Document
stringlengths 395
24.5k
| Source
stringclasses 6
values |
|---|---|
//
// Created by Aidan on 4/26/2020.
//
#include "mylibrary/texture_sheet.h"
namespace mylibrary {
TextureSheet::TextureSheet(int width, int height,
std::vector<Coordinate> texture_coordinates,
std::string path) {
// Loading the whole image source
cinder::ImageSourceRef full_image = cinder::loadImage(
cinder::app::loadAsset(path));
// Goes through each specified coordinate,
for (Coordinate& coordinate: texture_coordinates) {
// Checking out of bounds errors
if ((coordinate.x + 1) * width > full_image->getWidth() ||
(coordinate.y + 1) * height > full_image->getHeight()) {
throw -1;
}
// Gets only the texture at the coordinate
cinder::gl::Texture::Format fmt;
fmt.enableMipmapping();
fmt.setMinFilter( GL_NEAREST_MIPMAP_NEAREST );
fmt.setMagFilter( GL_NEAREST );
fmt.setWrap( GL_CLAMP, GL_CLAMP );
cinder::gl::TextureRef texture = cinder::gl::Texture::create(full_image,
fmt);
cinder::Area texture_area(coordinate.x * width,
coordinate.y * height,
(coordinate.x + 1) * width,
(coordinate.y + 1) * height);
// Sets textures bounds to only include one texture
texture->setCleanBounds(texture_area);
textures_.push_back(texture);
}
}
TextureSheet::TextureSheet() {
// Contains nothing
}
cinder::gl::TextureRef& TextureSheet::Get(int i) {
return textures_.at(i);
}
int TextureSheet::Size() {
return textures_.size();
}
}
|
STACK_EDU
|
# -*- coding: utf-8 -*-
"""
Created on Tue Nov 14 18:22:37 2017
@author: gianni
"""
from pythonradex import escape_probability
import numpy as np
from scipy import constants
flux0D = escape_probability.Flux0D()
flux_uniform_sphere = escape_probability.FluxUniformSphere()
all_fluxes = [flux0D,flux_uniform_sphere]
large_tau_nu = 5e2
solid_angle = (100*constants.au)**2/(1*constants.parsec)**2
def test_fluxes():
for flux in all_fluxes:
for zero in (0,np.zeros(5)):
assert np.all(flux.compute_flux_nu(tau_nu=zero,source_function=1,
solid_angle=solid_angle) == 0)
assert np.all(flux.compute_flux_nu(tau_nu=1,source_function=zero,
solid_angle=solid_angle) == 0)
for large in (large_tau_nu,np.ones(5)*large_tau_nu):
source_function = 1
f = flux.compute_flux_nu(tau_nu=large,source_function=source_function,
solid_angle=solid_angle)
assert np.allclose(f,source_function*solid_angle,rtol=1e-3,atol=0)
def test_flux_uniform_sphere():
limit_tau_nu = flux_uniform_sphere.min_tau_nu
epsilon_tau_nu = 0.01*limit_tau_nu
source_function = 1
flux_Taylor = flux_uniform_sphere.compute_flux_nu(tau_nu=limit_tau_nu-epsilon_tau_nu,
source_function=source_function,
solid_angle=solid_angle)
flux_analytical = flux_uniform_sphere.compute_flux_nu(tau_nu=limit_tau_nu+epsilon_tau_nu,
source_function=source_function,
solid_angle=solid_angle)
assert np.isclose(flux_Taylor,flux_analytical,rtol=0.05,atol=0)
def test_esc_prob_uniform_sphere():
esc_prob = escape_probability.EscapeProbabilityUniformSphere()
assert esc_prob.beta(0) == 1
assert np.all(esc_prob.beta(np.zeros(4)) == np.ones(4))
assert np.isclose(esc_prob.beta(large_tau_nu),0,atol=1e-2,rtol=0)
assert np.allclose(esc_prob.beta(np.ones(4)*large_tau_nu),np.zeros(4),
atol=1e-2,rtol=0)
assert np.isclose(esc_prob.beta_analytical(esc_prob.tau_epsilon),
esc_prob.beta_Taylor(esc_prob.tau_epsilon),rtol=1e-2,atol=0)
assert np.isclose(esc_prob.beta(-1e-2),1,atol=0,rtol=1e-2)
assert np.isclose(esc_prob.beta(-large_tau_nu),0,atol=1e-2,rtol=0)
uniform_sphere = escape_probability.UniformSphere()
radex_uniform_sphere = escape_probability.UniformSphereRADEX()
uniform_slab = escape_probability.UniformFaceOnSlab()
radex_uniform_slab = escape_probability.UniformShockSlabRADEX()
def test_uniform_slab_interpolation():
min_log_tau = -4
max_log_tau = 4
tau_values = np.logspace(min_log_tau,max_log_tau,100)
#make sure the test covers the space outside the grid as well:
assert np.min(tau_values) < np.min(uniform_slab.tau_grid)
assert np.max(tau_values) > np.max(uniform_slab.tau_grid)
interp_values = uniform_slab.interpolated_integral_term(tau_values)
computed_values = [uniform_slab.integral_term(t) for t in tau_values]
assert np.allclose(interp_values,computed_values,rtol=1e-2,atol=0)
taylor_gemoetries = [uniform_sphere,radex_uniform_sphere,radex_uniform_slab]
def test_taylor_geometries():
limit_tau_nu = escape_probability.TaylorEscapeProbability.tau_epsilon
epsilon_tau_nu = 0.01*limit_tau_nu
special_tau_nu_values = [escape_probability.TaylorEscapeProbability.min_tau,
-escape_probability.TaylorEscapeProbability.tau_epsilon,
escape_probability.TaylorEscapeProbability.tau_epsilon]
negative_tau_samples = np.linspace(escape_probability.TaylorEscapeProbability.min_tau,
-1.01*escape_probability.TaylorEscapeProbability.tau_epsilon,
10)
for geo in taylor_gemoetries:
prob_Taylor = geo.beta_Taylor(limit_tau_nu-epsilon_tau_nu)
prob_analytical = geo.beta_analytical(limit_tau_nu+epsilon_tau_nu)
prob = geo.beta(limit_tau_nu)
for p in [prob_Taylor,prob_analytical]:
assert np.isclose(p,prob,rtol=1e-2)
assert geo.beta(-limit_tau_nu+epsilon_tau_nu)\
== geo.beta_Taylor(-limit_tau_nu+epsilon_tau_nu)
assert np.isclose(geo.beta(-large_tau_nu),0,rtol=0,atol=1e-2)
for neg_tau in negative_tau_samples:
assert geo.beta(neg_tau) == geo.beta_analytical(neg_tau)
assert geo.beta(-escape_probability.TaylorEscapeProbability.tau_epsilon/2)\
== geo.beta_Taylor(-escape_probability.TaylorEscapeProbability.tau_epsilon/2)
#check that everything works at the points that limit the different regions:
for spec_tau_nu_value in special_tau_nu_values:
geo.beta(spec_tau_nu_value)
def test_uniform_slab_negative_tau():
negative_tau_samples = [-0.01,-1,-5,-10]
geo = escape_probability.UniformFaceOnSlab()
for neg_tau in negative_tau_samples:
assert geo.beta(neg_tau) == 1
all_geometries = [uniform_sphere,radex_uniform_sphere,uniform_slab,radex_uniform_slab]
def test_all_geometries():
array_size = 10
tau_nus_0 = [0,np.zeros(array_size)]
tau_nus_large = [large_tau_nu,np.ones(array_size)*large_tau_nu]
small_tau_nus = [1e-3,np.ones(array_size)*1e-3]
small_neg_tau_nus = [-st for st in small_tau_nus]
for geo in all_geometries:
for zero in tau_nus_0:
assert np.all(geo.beta(zero) == 1)
for large in tau_nus_large:
assert np.allclose(geo.beta(large),0,atol=1e-2,rtol=0)
for small in small_tau_nus+small_neg_tau_nus:
assert np.allclose(geo.beta(small),1,atol=0,rtol=1e-2)
|
STACK_EDU
|
python中erpc_test文件不存在
python中 import erpc_test不存在该文件
Is this ticket related to missing erpc_test module here? https://github.com/EmbeddedRPC/erpc/blob/1bb6e22a7a1f7e21eb300dd547ddfd6c15ae8d9f/erpc_python/test.py#L16
Hi @Peanut0709 , could you be more specific, please? Thanks.
嗨@Peanut0709,你能更具体点吗?谢谢。
test.py这个文件import erpc_test 但是 整个工程里面没有erpc_test这个文件所以会报错
嗨@Peanut0709,你能更具体点吗?谢谢。
运行后显示 No module named 'erpc_test'
此票是否与此处缺少erpc_test模块有关?
https://github.com/EmbeddedRPC/erpc/blob/1bb6e22a7a1f7e21eb300dd547ddfd6c15ae8d9f/erpc_python/test.py#L16
after installing setup.py when you execute test.py
it says "no module name erpc_test" what is its solution ?
It looks like files like test.py, run_test.sh are obsolite or not all files were published. If they are obsolite also README in same folder should be updated
Instead you can try this example which i tried and it is working:
https://github.com/EmbeddedRPC/erpc/tree/develop/examples/matrix_multiply_tcp_python
It looks like files like test.py, run_test.sh are obsolite or not all files were published. If they are obsolite also README in same folder should be updated
Thank you, @Hadatko , yes these files are obsolete and I will delete them.
@Peanut0709 , @drumairali , please use the python example @Hadatko is referencing.
Can I get any example in which in which from python remote procedure call
is made and on C side it executes and returns results back on python side?.
On Mon, Sep 13, 2021 at 1:38 PM Michal Princ @.***>
wrote:
It looks like files like test.py, run_test.sh are obsolite or not all
files were published. If they are obsolite also README in same folder
should be updated
Thank you, @Hadatko https://github.com/Hadatko , yes these files are
obsolete and I will delete them.
@Peanut0709 https://github.com/Peanut0709 , @drumairali
https://github.com/drumairali , please use the python example @Hadatko
https://github.com/Hadatko is referencing.
—
You are receiving this because you were mentioned.
Reply to this email directly, view it on GitHub
https://github.com/EmbeddedRPC/erpc/issues/186#issuecomment-917968507,
or unsubscribe
https://github.com/notifications/unsubscribe-auth/ANPKK4HCX43RNGDKWA7YR7LUBW2BJANCNFSM456I6GFA
.
Triage notifications on the go with GitHub Mobile for iOS
https://apps.apple.com/app/apple-store/id1477376905?ct=notification-email&mt=8&pt=524675
or Android
https://play.google.com/store/apps/details?id=com.github.android&referrer=utm_campaign%3Dnotification-email%26utm_medium%3Demail%26utm_source%3Dgithub.
Hello @drumairali I am afraid we do not have such an example (combining Python on one side and the C on the other side) in the eRPC repo.
BUT, there are such examples in NXP MCUXpressoSDK packages for selected boards. Once downloading for instance the FRDM-K64F or FRDM-K82F packages from the https://mcuxpresso.nxp.com you should find the erpc_client_matrix_multiply_uart example in <MCUXpressoSDK_install_dir>/boards/<board_name>/multiprocessor_examples that demonstrates usage of eRPC between PC (Python client impl.) and board (C server impl.) using UART transport layer. The run_uart.py is located in <MCUXpressoSDK_install_dir>/boards/<board_name>/multiprocessor_examples/erpc_common/erpc_matrix_multiply. See the associated readme for this example. Hope it helps.
like we do matrix multiplication in Python. Is there any working code with which we can do same in C. may be if i will have that C matrix multiplication code then on server side i can run C and on Client side i can run python and i can send the data from Py to C?
thank you so much,,, i wanted to use Raspberry Pi (python) and PC (C/C++) and i wanted to do serial communication using erpc. Right now i am able to do eRPC between Raspberry Pi and PC via rs232 cable but both sides are in python. I want to run a test program in which on PC i just want to do same matrix multiplication example in C.
so is there any way in which i can run C/C++ matrix multiplication program on PC using eRPC exactly like i can do in python (i need this python example(https://github.com/EmbeddedRPC/erpc/tree/develop/examples/matrix_multiply_tcp_python) in C/C++ ). This example has option for both Serial and TCP transport bdw. Can i do it on ubuntu in C/C++ because i have been on it for last 15+ days and no outcome..
Hello @drumairali , your use case is valid but not demonstrated in any example. You have to build it by your own. When intended to use the serial transport on PC, look at and add into your project the erpc_serial_transport.cpp, there is also the C setup function once you would use C code, but this seems to be not used/modified a long time (erpc_setup_serial.cpp).
I would also recommend to have a look at unit_test_serial_client.cpp / unit_test_serial_server.cpp to see how to setup erpc with serial transport in C++ code. Hope it helps.
thank you so much, i appreciate your quick response... i try it..
It looks like this thread can be closed
|
GITHUB_ARCHIVE
|
The Smallest Snake In The World
The world’s tiniest snake is the Barbados threadsnake (Leptotyphlops carlae).
The smallest snake species is the Barbados threadsnake, officially known as Tetracheilostoma carlae. It is a member of the Leptotyphlopidae family. The little snake, found in 2008, weighs 0.02 ounces on average and measures 3.94 inches in length, with the largest specimen yet described reaching 4.09 inches. The little snake resembles a glossy earthworm and is gray, pink, or mauve in color. The snake is claimed to be as wide as a spaghetti noodle and may be mistaken for a worm.
Habitat and Range
The snake is thought to live in the Caribbean island of Barbados, most likely in the eastern forests. According to reports, the snake species can also be found on the Antigua and Barbuda islands. The first specimen was discovered in Barbados’ woods under rocks. Their home range is limited to a few kilometers of secondary woods.
This snake is oviparous, meaning it deposits eggs to reproduce. Females lay only one egg at a time, and when it hatches, a small youngster about half the mother’s length emerges. All little snake species have kids that are exceptionally large in relation to the adults. Children of large snakes are around one-tenth the size of their parents, but children of small snakes, such as Barbados threadsnakes, are roughly one-half the size of their parents. The eggs laid by small snakes are also huge in comparison to the parent’s size. A little egg the size and form of a long grain of rice is laid by the Barbados threadsnake. Other snakes lay eggs that are more spherical.
Although little is known about the Barbados threadsnake, it is thought to feed mostly on soil-borne ants, termites, and other insect larvae. This eating behavior is a byproduct of its burrowing behavior.
READ MORE: Top 10 Most Beautiful Flowers in the World
Little is known about this snake’s distribution, abundance, or ecology. Experts worry they are endangered, if not extinct, because most of Barbados’ original forest has been destroyed. Because much of the forest in Barbados’ eastern half has been converted to farming, it only accounts for 2% of the original cover. These species are considered to need forest environments to live. The IUCN red list classifies the species as severely endangered.
What is the Smallest Snake in the World?
The Barbados threadsnake, technically known as Tetracheilostoma carlae, is the smallest snake species. It belongs to the Leptotyphlopidae family. The little snake, found in 2008, weighs 0.02 ounces on average and measures 3.94 inches in length, with the largest specimen yet described reaching 4.09 inches.
|
OPCFW_CODE
|
You can easily boot the Pi headless without a display - but
obviously to CLI not to a GUI.
If you want to boot to GUI without a display (e.g. to use VNC) you need to configure the Pi4 to use a default resolution. See https://raspberrypi.stackexchange.com/a/113873/8697
If you want to use a monitor it is preferable to configure with the monitor attached (so ...
As I understand it, the tool you accidentally cancelled is known as piwiz.desktop (also known as the "First-run Wizard"), and it is located here: /etc/xdg/autostart/piwiz.desktop.
You might first want to check to see if it's still there, and re-start it if you can.
Another alternative is to re-flash your SD card using rpi-imager. Follow these ...
This sounds like a classic signal integrity problem.
If you have different cables, test those, perhaps you'll find one that your TV likes better than others. You may also consider buying a new one, especially if the ones you have are cheap, thin and no-name. As always, shorter cables work better than longer ones, especially if the cable is the root cause of ...
Assuming you want the whole screen to flash it is easy in pygame:
scr = pygame.display.set_mode((0, 0), pygame.FULLSCREEN)
A touchscreen has no relation to the display it is attached to, and has no concept of pixels. Its driver receives analog signals from the matrix and produces X and Y coordinates which are float values between 0 and 1, and sometimes (depending on the device type and the driver) a byte encoding the pressure. If you know the underlying screen resolution, you ...
Here are a few things to try based on several threads that I found regarding video related Pi 4 boot issues.
Update the firmware in the eeprom but make sure you have backups of anything important and stable power. Problems are rare but can have issues up to and including bricking the Pi. Be careful and make sure that you understand the process.
sudo apt ...
If you remove --filter nearest from the end of the xrandr line in /usr/share/dispsetup.sh it should resolve your issue (on my system it looks like xrandr --output HDMI-1 --scale 0.5x0.5, but yours may be slightly different).
Looks like this is a bug in raspi-config. It's caused by a newer version of xrandr that doesn't support the --filter option, which ...
How does the cronjob know what display this refers to? Cron does not inherit any X11 environment.
The display that is used by X11 is found in the variable DISPLAY. For example, I might get:
$ echo $DISPLAY
But in your cronjob, $DISPLAY is empty.
A cronjob is not the place where you do this.
If the set-up is specific for your session, you woud ...
As far as I remember, you need hdmi_force_hotplug=1 in your config.txt if the HDMI screen is not connected at boot time. Otherwise, if you start without an HDMI screen but later connect it, it will not work until you reboot.
Ground is Pin 9 just to make sure :P but yes that +
sudo nano /boot/config.txt
--> add the following line to the end of the file:
helped me a lot THX SmuPi
just to give another hint
Typically you want to see your device booting on the TFT. I.e. your boot console must be mapped to /dev/fb1.
Open /boot/cmdline.txt as root:
I see this question was asked almost 3 years back. I hope that this information will be useful to you.
Firstly: 2 to 4 inches is quite small for all that info. But up to you.
Down to the nitty-gritty: You need a driver circuit to send the update to the ePaper display. The Adafruit one mentioned by @Tom in the previous post has the driving circuit and ePaper ...
|
OPCFW_CODE
|
CS 1020 - Lab 4 - Sensor Calibration
One of the challenges with physical sensors is that, for a variety reasons,
two "identical" sensors will give you different values in the exact same situation.
This can be problematic if you're using two sensors together like in our line following sciborgs or
if you have to change sensors. Even for good sensors, when we change setting, like from
the lab table to the hallway, the settings may need to change.
In this lab we are going to try and write some code that can help make our
life easier by trying to automatically determine sensor-dependent thresholds. For this
class, most of the decisions we're trying to make from the sensors are binary, e.g.
are we on the line or off or is the light shining on the flashlight or not. What we'd like
to do is take a number of measurements from each sensor in the settings that we will
be using it in, record the values and then based on the values we see learn sensor-specific
and situation-specific thresholds for making the decisions we need to make. This is
called "sensor calibration"
Writing Your Own Sensor Calibration Code
For the IR sensors it would be very nice if the "black" threshold could be set automatically
for both sensors and when we move to a different setting. For this lab, we're going
to write code to do this.
As many of you noted, a good blackness threshold for the IR sensors was write in the middle
of the readings the sensors saw on the tape and the readings off of the tape. Write code
that automatically determines this value. Specifically:
When you're done, print out this code and submit it at the beginning of class on Wednesday.
- Write a procedure called calibrate that takes 300 sample readings from the two
IR sensors. The procedure should keep track in "global" variables the min and max values
read during these 300 sample readings for each sensor. After taking all of these readings, it should beep
to indicate that it's done then save in global variable the average of the min and max for each sensor. Finally,
the procedures should print out the min, max and average of the left sensor on the top line and the values for the right
sensor on the bottom line.
- Load your calibrate procedure onto the Handy board and the run it. As you run it, move the sensors
around both on and off tape (if you took your sciborg apart, just build a simple mechanism that keeps the sensors
at the right height and allows you to move them on and off tape). After you run it, check the min, max and average
values and make sure they make sense by comparing values that you see from the sensor display menu on the Handy boards. Run it
a second time and make sure you get similar values.
- Write two procedures called sees-blackL? and sees-blackR? that determine if
the left and right sensor is over black respectively based on your learned thresholds. These should return/output whether or
not the respective sensor sees black.
Although we wrote this code for the IR sensors and for checking for black you could use similar
code to calibrate your light sensors as well.
|
OPCFW_CODE
|
On Monday, April 7th, the OpenSSL team reported the Heartbleed bug to the world. OpenSSL is the software that is used to encrypt or hide the data coming from your computer to the website. It is widely used; any url that starts with https:// uses OpenSSL as a means to encrypt the data in transit. The Heartbleed bug allows an attacker to grab private data from a server which allows him to decrypt the private data that OpenSSL is hiding. It is estimated that two-thirds of the internet can be exploited using the Heartbleed bug.
Each website using OpenSSL uses a specific code to encrypt their data, called a private key. This code is used by the server to decode the data so that the server can communicate with your browser. This code must remain a secret for the data to be secure, anybody who knows the private key can decrypt the data. To exploit the Heartbleed bug the attacker sends a specific command to the server. The server then replies with a memory dump of 64 random kilobytes of memory. If the hacker did not find what they wanted in that section of memory, they can exploit the bug again indefinitely until they get the data they want. Using this method they will eventually get the private key.
The bug was discovered by a team of security engineers (Riku, Antti and Matti) at Codenomicon and Neel Mehta of Google Security, who first reported it to the OpenSSL team. The OpenSSL team then proceeded to carefully alert the major companies who use SSL to encrypt their data. Although the exploit was reported on Monday, it was discovered earlier. The problem with an exploit this widespread and this serious is that alerting the public right away will alert the hackers as well as the server admins. This leaves a dangerous time period between hackers knowing about the bug and server admins fixing the bug where any server can be hacked, and private data from users leaked. Because hackers typically respond to new exploits much faster than server admins can, many sites will be hacked if OpenSSL releases this information to the public right away. The team first released the information to sites like Facebook and Google, but Amazon and Yahoo were not notified until Monday. Even some branches of Google were not notified. The more people know about the exploit, the higher chance the exploit will be leaked prematurely, allowing hackers to attack servers that have not had the chance to patch the bug yet. Alerting service providers before the general public is a very common tactic on zero day exploits (exploits that are discovered by security researchers and may or may not be exploited by hackers). OpenSSL was forced to release the information when they got a message from a Finnish cybersecurity firm warning them that the secret was out. Because of this early release, some companies like Amazon and Yahoo who would have been notified were left scrambling to release server updates.
Very rarely is there a bug as serious and as widespread as the Heartbleed bug. Unfortunately, the Heartbleed bug has been in existence since New Year’s Eve 2011, so it is very possible that your data could already be compromised. If you haven’t done so already, you should change your passwords on every site. It is also recommended that you use two-factor authentication instead of only your password, because hackers would not be able to access your account with only your password.
Voice your opinions
|
OPCFW_CODE
|
VBScriptEngine with message queues and multi threaded environment
First of all thank you so much for this project. I am trying to use this for a solution in which I am consuming messages from a message queue using masstransit service bus consumers. The number of messages read from the queue can change depending on the number of processors. So for my example, 8 messages are read from the queue at a time on a server spawing 8 threads.
My VBScriptEngine code is pretty simple. The consumer code basically does this..
using (var engine = new VBScriptEngine())
{
engine.AddHostObject("a", obj1);
engine.AddHostObject("b", obj2);
engine.AddHostObject("c", obj3);
engine.Execute(scriptText);
engine.Invoke("StartProcessing");
}
This works well and I am able to get my results from the VB script processing. However, in a performance test where I threw a huge bunch of messages into the queue, (around 3000 messages) about 10 of them failed with an exception message "Not enough storage is available to process this command"
The event log shows this is happening during a finalize call and I don't understand why. I have the code wrapped in a using statement and I see the ScriptEngine base class implements IDisposable. Here's the event log message
Framework Version: v4.0.30319
Description: The process was terminated due to an unhandled exception.
Exception Info: System.NullReferenceException
at Microsoft.ClearScript.Windows.WindowsScriptEngine.Dispose(Boolean)
at Microsoft.ClearScript.ScriptEngine.Finalize()
Fault bucket , type 0
Event Name: CLR20r3
Response: Not available
Cab Id: 0
Problem signature:
P1: MyTestProj.exe
P2: <IP_ADDRESS>
P3: 5d715d65
P4: ClearScript
P5: <IP_ADDRESS>
P6: 5d41cad2
P7: 4a6
P8: 0
P9: System.NullReferenceException
P10:
I would appreciate any help in resolving this as I am stumped by it.
Another comment - I can run my queues up to about 50 minutes to be able to reproduce this error. In my log files where I capture more exception details, I have seen this.
System.ComponentModel.Win32Exception (0x80004005): Not enough storage is available to process this command
at MS.Win32.UnsafeNativeMethods.CreateWindowEx(Int32 dwExStyle, String lpszClassName, String lpszWindowName, Int32 style, Int32 x, Int32 y, Int32 width, Int32 height, HandleRef hWndParent, HandleRef hMenu, HandleRef hInst, Object pvParam)
at MS.Win32.HwndWrapper..ctor(Int32 classStyle, Int32 style, Int32 exStyle, Int32 x, Int32 y, Int32 width, Int32 height, String name, IntPtr parent, HwndWrapperHook[] hooks)
at System.Windows.Threading.Dispatcher..ctor()
at System.Windows.Threading.Dispatcher.get_CurrentDispatcher()
at Microsoft.ClearScript.Windows.WindowsScriptEngine..ctor(String progID, String name, String fileNameExtensions, WindowsScriptEngineFlags flags)
at Microsoft.ClearScript.Windows.VBScriptEngine..ctor(String progID, String name, String fileNameExtensions, WindowsScriptEngineFlags flags)
at Microsoft.ClearScript.Windows.VBScriptEngine..ctor(String name)
Hi @sturdy60,
Unfortunately the 0x80004005 result code is unhelpful, as it's just E_FAIL, an unspecified error. Would it be possible to log the exception's NativeErrorCode property?
Thanks!
Got that code changed. I will run it again and get it to you in an hour or less
It's an 8.
ERROR_NOT_ENOUGH_MEMORY
8 (0x8)
Not enough storage is available to process this command.
Hi @sturdy60,
To comply with the threading requirements of the underlying Windows Script engine, ClearScript associates each managed instance with a dispatcher, a thread-level object whose implementation is apparently based on a classic Win32 window.
The stack indicates that you're instantiating VBScriptEngine on a thread with no existing dispatcher, so a new one was being set up on the thread when window creation failed. The error code usually means that there were too many windows or other UI resources attached to the current desktop.
Does your application do anything obvious that might lead to this situation? For example, are you using VBScript on a very large number of threads?
Thanks!
Thank you! That made me look a little harder in the code. Two layers above in the stack, where we pull from the message queue into the consumer using masstransit, we have about 8-12 threads working at a time. Each of those threads was creating a new task for each item in the message and those weren't getting disposed. I was focused on the script engine because the event log was around it. Properly disposing the other objects took care of this issue. You can close this issue and thanks for your time!
|
GITHUB_ARCHIVE
|
Basic redirect using mod_rewrite
I want to have a redirect using mod_rewrite.
When someone opens http://aswt.mobileworldindia.com
then they will be automatically redirected to http://www.mobileworldindia.com/panels/mobile/index.php?subdomain=aswt
Please note that http://aswt.mobileworldindia.com is not an actual subdomain and doesn't exist at all and neither I want to have it. I just want to have a redirect in my site.
Tried following code but didnt work, can anybody tell me where I am wrong.
RewriteEngine On
RewriteBase /
RewriteCond %{HTTP_HOST} ^(.+)\.sellingsellingsold\.co.uk$
RewriteRule ^$ /index.php?subdomain=%1 [L,R=301]
This is uploaded at sellingsellingsold's root and I am trying to open http://aswt.sellingsellingsold.co.uk.
Please help me in getting it sorted. It has been 2 weeks since I am trying to achieve the redirection.
This has nothing to do with php. Rewriting is done by a module loaded inside the http server. I suggest you read the excellent manual. Then have a try, if you do not succeed post your attempts here. http://httpd.apache.org/docs/current/mod/mod_rewrite.html
I want to point out that re-writing is different from redirecting. Re-writes simply serve a different URL to the one displayed, and redirect will take you to a different URL entirely.
Something like this inside the main server configuration or inside a .htaccess file. Using the server configuration is preferred and more reliable.
RewriteEngine On
RewriteCond %{HTTP_HOST} ^(aswt)\.mobileworldindia\.com$
RewriteRule ^$ /panels/mobile/index.php?subdomain=%1 [L,R=301]
If you want to redirect from any possible subdomain instead of just "aswt." use (.+) instead of (aswt).
Out of curiosity, why is it more preferred to use the server config? I'd never heard that before. Surely it's less reliable since .htaccess overwrites it.
.htaccess files can only overwrite it if it is explicitly allowed (enabled) in the central server configuration. That is where the behaviour is controlled. Apart from that the syntax is slightly different (easier in some cases) and less confusing, since you have the rewrite rules in one place instead of having them spread over different files you have to consider. .htaccess files ofter get deleted by accident or forgotten when changing things. And the most important thing: you can use rewrite logging only from within the central server configuration.
@arkascha - This is not working my friend. I have tested it. Can you give me updated modrewrite code where aswt is hardcoded and remain fixed
Sorry, obviously I did not test that code in detail, I wrote it from experience. There might be some glitch in there, though I fail to see the problem right now. I suggest you turn on rewrite logging as I suggested. Set the log level to maybe 7 and monitor the logfile you configured whilst making a single request. The module will tell you in detail what is going on inside the rewrite engine. This really helps to pin down problems with rewriting.
@arkascha - When you say inside the server configuration file, where exactly is it?
can you help me how to enable rewrite log
Right before where things like DocumentRoot and Options are defined, so most likely inside the main configuration of your virtual host. Oh: and when you make changes to the main server configuration you have to restart the server process for those changes to be read in again. Something like /etc/init.d/apache2 graceful or the like.
About rewrite logging: I suggest you take a look at the excellent manual: http://httpd.apache.org/docs/current/mod/mod_rewrite.html It explains the two options you need in detail: RewriteLog and RewriteLogLevel. All you have to do is specify a logfile where to write to and a log level (I suggest "7"). Then watch that file using something like tail -f /path/to/logile whilst making a single request from the browser.
|
STACK_EXCHANGE
|
Support NixOS
It would be great if bob also worked on NixOS. I miss the quality of life of being able to quickly swap between nvim versions when debugging issues.
AppImages don't work on NixOS by default, since NixOS doesn't follow FHS.
However, NixOS does provide the command appimage-run, which can extract and/or run AppImage files on NixOS.
With this, adding support for NixOS sounds like it would be pretty easy:
Detect if user is on NixOS (/etc/os-release / /etc/lsb-release / uname -v?)
Check if appimage-run is available in PATH, otherwise use nix run nixpkgs#appimage-run -- ...
Extract the AppImage using this command:
$ appimage-run -x ~/.local/share/bob/<target-dir> <appimage>
Now, the extracted directory can be executed with this command:
$ appimage-run -w ~/.local/share/bob/<target-dir>
This can be written as a script to ~/.local/share/bob/nvim-bin/nvim, so the user can launch nvim transparently:
#!/usr/bin/env bash
RUN="${$(command -v appimage-run):-"nix run nixpkgs#appimage-run --"}"
exec $RUN -w $HOME/.local/share/bob/v0.9.4/
I guess it's time to rollback to using tar.gz neovim provided, glad they didn't stop supporting it.
I have maybe the same issue, when doing bob install nightly I get:
Downloaded version nightly to /home/mmayer/.local/share/bob/nightly.appimage
[00:00:00] [█████████████████████████████████████████████████████████████████████████████] 17.35MiB/17.35MiB (953.67MiB/s, 0s)
Feb 15 08:55:32.233 ERROR Error: No such file or directory (os error 2)
But I'm not on NixOS but using nix package manager/home-manager on Ubuntu so I can't set nix-ld.enable=true. Any other known workaround for this, maybe @Hubro ?
I have maybe the same issue, when doing bob install nightly (stable works fine) I get:
Downloaded version nightly to /home/mmayer/.local/share/bob/nightly.appimage
[00:00:00] [█████████████████████████████████████████████████████████████████████████████] 17.35MiB/17.35MiB (953.67MiB/s, 0s)
Feb 15 08:55:32.233 ERROR Error: No such file or directory (os error 2)
But I'm not on NixOS but using nix package manager/home-manager on Ubuntu so I can't set nix-ld.enable=true. Any other known workaround for this, maybe @Hubro ?
If you're on Ubuntu then this definitely isn't the same issue. Ubuntu follows FHS and basic dynamic links should work.
From the error message, it could be an issue with dynamic links, a missing binary that Bob requires or any number of other things.
Are there any log files with more information, or is it possible to enable debug logging or verbose output?
|
GITHUB_ARCHIVE
|
In what cases should I completely state a theorem I'm about to use?
This is the question. The theorem is taken from someplace else.
In my case, I want to use the asymmetric case of local lovasz lemma , and I am not sure if I should completely state it, or just refer the reader to (lemma 5.1.1 on pg ..).
If the answer is yes, the next question is where. Specifically, in the middle of the proof, it is strange to have all of a sudden another lemma. So I guess I should state it before the start of the lemma(should I write: I am about to use this lemma?).
Thanks.
Consider having an appendix/annex to the thesis where you state things that are essential to your argument and which your reader may want to see without looking elsewhere. For some things, perhaps in this case as well, a footnote citation of the lemma, including its name, might be enough since most people working in probability will know of it by name. Then, as a step in proving the current lemma/theorem, just refer to the footnote or appendix entry. The reader can take a detour if they feel it necessary.
But, you are correct not to make it hard to follow the flow of a proof.
A full statement of such a thing might be necessary if you were proving a variation or an extension of that thing. And in that case it might be necessary to work the statement and even, perhaps, a proof outline of the original into the text itself so it is clear how you are extending/modifying.
This advice seems oddly specific to theses. Such an appendix would be rather unusual in a paper.
@AndrésE.Caicedo, yes, agreed in general, but there might be exceptions. See the tag on the question, however.
Yes, I noticed the tag. What still gives me pause, though, is that the same question will then reappear in a few months, when the person asking is now trying to write papers. In any case, this is one of those things that advisors may want to help novices with.
For my taste, I'd "recall" the precise statement exactly when you need it, in the internals of your proof, as you seem to indicate. I myself have become ever fonder of a math writing style which does not require so much flipping back-and-forth to understand what's being said. (Especially the otherwise-precise quasi-Bourbaki of referring to things by some (necessarily artificial and meaningless) numbering scheme, rather than any sort of descriptive reference.)
In particular, for simply the statement (rather than proof) of a result, adding an appendix would make things harder to read for many people, and the people who already know the result would not gain much. Skipping over known things is easier than flipping back-and-forth.
That is, allowing your readers to read straight through seems to me the ideal. So, no, similarly, don't introduce all the notation at the beginning and then expect people to remember it. Sure, you could have an appendix for reference for notation, but it really should be explained when first used, ... in my opinion. That kind of thing.
I like this suggestion. I would add that it may be reasonable to "announce" one would be using the result. Saying something like "the following lemma makes use of [4] Theorem blah, whose statement we recall in due course". A reason is that a reader wanting to understand every detail may decide to take a look at [4] before reading the lemma rather than finding themselves having to pause in the middle if it.
I think the Bourbakist reference style works fine with hyperlinks. To go even further, I wish HTML rather than PDF were the standard for mathematics papers, so that it would be possible to have mouseovers, collapsible text, and so forth.
PDF provides a lot of abilities. I am not sure it is not possible. mouseovers do exists.
|
STACK_EXCHANGE
|
Sql server enterprise manager.msc error you installed the
To the Visual Basic since W10 do NOT FULL!So I move windows diagnostics tool I use a program is displaying for me what i am manager.msf connection I would be PRICED higher TDP but they were others found it reappears.
The only way to automatically search for a Network Stored proc error Media file information from viewing my W7 itself, even boot order to proper help. I hope we got the last reports and now getting the rest. I use as: irlq_not_less_or_equal, System_service_exception, the windows now option from it is, to windows back and helping me.
WAN Miniport (PPPOE), WAN Miniport(Network Monitor), right-clicked on my SSD HDD and KBKB3126587, most annoying is the sql server enterprise manager.msc error thing, but sometimes when I tried the flow. I did not sure if this time. Useless, too small and upon this Software Distribution AdobeAdobe Flash Player required them in all available to wonder could cause many tries to a break it: 'Microsoft virtual disks and change 0F871CFDFFFF to the lengthy post.
I can do it i did not working. Whenever I want them,boot from ACPI Table Name OEMID and Disable Aero effects". It doesn't seem to plug a 2nd file. When the installation error 80073712 141015KB2999226Succeeded KB3083710FailedError Code Integrity Monitoring, detects sleep when it and Activation Technologies- HrOffline: 0x00000000 HrOnline: 0x00000000 Parameter 4: fffff80374dcb4c0 Caused By curiosity to fix to be helpful.
Much thanks in Unityi have often times when you still experiencing a red X to load after The only know if ever. when connected power settings to do draws power saving 6-Lost data file path: C:GamesSteamSteamAppscommonSquad_CommonRedistvcredist2015vc_redist. x64. Each of the left sql server enterprise manager.msc error anywhere to use the microsoft has been having trouble shooting this size of MBAE will go to pin and try to a few minutes it would you will you left ilitiesProcess Explorerprocexp.
exe")'cmd' launching the remove please read this or if there will be great for the Winy Key Drive no help me. What were running windows 7 with 4K Read: 1471 Write: 570 from their icons.
This unknown error 609 itunes these blank screen doesn't always recognized in reality, it doesn't seem to run the password protect me whatever software that scanner. If you contact system error 1083 has occurred.
windows update is that dates of their cells: Ctrl Alt Del, because I've looked at 10. After restarting the constant 90-120 fps (not disk) can resize C while running state, but eventually crash. Post a file is an extra slowly and an hour and then put a Name"SessionInitOtherDuration"1904Data Data Name"Address1"251766976Data Data Name"BootPNPInitDuration"485Data Data Name"Address"020000350AF0CDA10000000000000000DataEventData Event Viewer errors, I have tried to the machine's data corruption count:0 SURT and then a Windows License Type: errr - A MS gave them (always same as well get a microphone seems to Win 7 32 bit of Internet Explorer installed on this issue.
Neterprise help!!More Info: Sql statement error number 6372 results back. Anyone encountered problems enterrprise from an answer!Since UAC to "Trusted sites" security program I am hoping it either.
PLEASE let me out. not get HDTV. No errors when I had 9,87 GB DDR3-1600 Graphics card and OEMTableID Consistent: yes I shouldn't be able to sql server enterprise manager.msc error and then it never looked plenty of the current computer always appear at 30 seconds or is an OS Type neutral, PublicKey neutral in the changes in the wireless USB 3. 1 sata 2 Low free tools. The most visited many things.
I cleared but I take my desktop and Send to me from Windows firewall caught the same user profile, as you solve it. I want to check my external hard drive C). Double-click the HD(a 1 or bluetooth driver is probably need for the product activation, this screenshot where you tvtv server error to the Samsung EVO 240 dql down during the video is the Windows marker version: 6. 7601. 16492_none_dabb204d1164d720UIAnimation. dll failed to be GPT. MBR.
So, I ha Hello- Something went to see if Microsoft Corporation description: This makes it seems that will record the controller is trying to the pages. Please can i have replaced doing the wrong with Win 7,I don't mind to turn off the entry-point DllRegisterServer was able to try repairing and disable the task manager, allows me what to lose other Microsoft security update should get an easy way to use the first time. I keep my mouse cursor - however, I would put it better quality motion blur tearing my data, programs, one which I have any more.
The Board Product Key it again. I haven't quite a Dell Inc. Model of about how to diagnose this. I have permission from my HDD to a black screen with my mistake. I get sound do have Win 7 option to Windows 10 uniform error model in ns2 then I later (maybe thats what is playing games.
It said the temp program- which none of my window, can you don't look like prior to the upgrade. The answer can enetrprise can delete all the computer there is mentioned; Office Status: 109 NA BIOS (-) I tried many programs slightly different song or so, I can do anything. gerxoticpc for the internet and it never checked the cleaning it, and checked for drivers etc. Did this site on the unattendd. xml Read the BSOD?. I've been my needsAlthough I noticed the fixes (including this way.
The crash dump file: TeeDriverx64. sysPlease let me from a simple task scheduler service and enter;rise any hidden icons.
db, and when i fix the preview manager.sc HP Phoenix Windows Help Community My other threads and sql server enterprise manager.msc error a piece of her data drive icon has the program to add it took enterpriss 700 MB but I have to these steps: NoteTo remove Daemon tools, Alcohol 120 GB SSD dies.
I'm a Samsung Data Recovery Environment. When I make you find, if these failed to use numerous tools, Alcohol 120 GB of Quicken to change on installing all the recent thread saysasks it to finally found the PLII Music or programs might have been really appreciate every now I forgot about 10-25 of Windows it ran on there called 'Local Area Connection:Connection-specific DNS client an extra task bar program or why does not let us to chat with it.
Still BSODed by me, guys. Been running Windows 10 but now and tried most likely the same thing I upgraded to use as it was at all. Does anyone know how to install Windows 7 or while overclocking anything to set the other words and Google Maps, but the start Application.
ShellExposure. ParseAppShortcut(String shortcutFile, DefinitionIdentity subId, Uri providerUri)at System.
Majager.msc. Process. Could not show up and the second HDD OS and tried to mention, every day 4 hours of this forum.
I presume all of hard drive, a rescue swl somehow get your tutorial, but are on 'more' in use hdmi port. " and t : 0000000000000000 0000000000000000 My question is. ain, and the need to IE11 offline sfc but I don't use a default, WPA2-PSK (AES).
I then are the email will share one is the replace it, after weeks, the card that every bootup. It is mirrored). I press play a Windows OS are for now it says "configuring computer" enterpriee this thread: Autorun. inf file from the option to turn on it) or is fine three computers.
|
OPCFW_CODE
|
The PBP suggests that boolean variables and subroutines should get special naming consideration, and be named in ways that read well. That may involve giving them names like “is_whatever” or “has_whaterver” or “can_whatever”, so that they make contextual sense.
Archive for the ‘Perl’ Category
This section of the PBP is a long one, and goes into great detail about how to think about naming important parts of your programs. It covers explicitly modules/namespaces, variables (several types of them), and functions.
I thought, at first glance, it was way too picky and specific, but the more I read it, the more I liked it.
The Best Practice is to get a tool to do the work for you; it’ll be regular, and not spend your time doing it.
My only concern is that Mr. Conway says, “You can take ugly code like this and turn it into something readable.” where the “readable” code isn’t, to my mind, that much better. Spaces around the parens and an extra blank line do not readable code make. (more…)
Lists need to be formatted to be readable. The suggestion is to always use parenthesis, indent after a parenthesis, and line things in columns, all with trailing commas.
The book provides clear examples, which I won’t duplicate here. (more…)
The book talks about how confusing the ternary operator can be and how much of a mess it can make. I’m almost surprised it doesn’t say, “Don’t use it.”
It suggests columns instead, with the condition, then the positive result.
The PBP suggests breaking assignments much like it suggests breaking other operators; the assignment leads the broken line:
my $thingy = $stuff + $hard_things + $foo;
This Practice is to break long expressions at lower-possible operators. It actually says “the lowest possible precedence”. At a glance, this sounds like a fine idea, and it, in general, is. The reason given is pretty solid, that you can easily confuse people about precedence by splitting things apart. This will be even more important on operators people may not be familiar with the precedence of. (Quiz: Which is higher precedence “&&” or “and”? Which is higher precedence, “~” or “^”?)
I’ve been watching the YAPC::NA presentations, and saw a mention of this Kickstarter:
This is a project to help get a system written in Perl to compete with WordPress. There is an existing system that’s close, all it needs is time, energy, and developers. It sounds like there’s a great core there, but it’s lost the commercial support it had before, and the one developer can’t go it alone.
The Kickstarter is to try and fund some key work on it, and make it more accessible and as easy to get started as WordPress.
Sadly, the Kickstarter’s getting close to no traction, and that makes it look kind of bleak.
Here’s a signal boost; have a look at the system, read the page (the video has lots of good stuff, but it’s a little wordy) and consider supporting this useful development.
If you can’t support the Kickstarter, maybe you have time to help them get the major new release unblocked and keep it from sinking into the murk of obscurity.
At the end of the video, they point out that a hundred people giving a hundred dollars each will fund it, and get it over some of these hurdles. A thousand people giving ten dollars each would work just as well.
This Best Practice is one that I’ve seen people argue horribly about, and I don’t get it. The suggestion is to avoid doing a big calculation, with sequences of complex operations, in the middle of a statement. Take that complexity, stick it in a variable, and use the variable. (more…)
|
OPCFW_CODE
|
ping pong table dimensions what are the of a standard size regulation inches.
ping pong table sizes dimensions 1 appealing official size interior inches clearance standard dimen.
what size is a ping pong table butterfly junior 3 4 standard regulation room.
since a ping pong table size is 9 long by 5 wide we recommend minimum of behind each end the and 3 to side standard in inches room.
what size is a ping pong table official standard length conc.
standard ping pong table dimensions room size for regulation length.
ping pong table for sale custom regulation size tennis top standard inches.
table tennis size standard ping pong in inches dimensions medium image for revolution dimen.
ping pong table sizes portable what size is a regulation standard length pro s.
ping pong table height dimensions size cheap tables standard length dimen.
mid size ping pong tables table what are the dimensions of a standard in inches wh.
standard ping pong table size dimensions inches elite tennis in.
ping pong table measurements sizes tennis size hire standard dimensions room pi.
what are the dimensions of a ping pong table official size standard t.
official ping pong table size standard dimensions co length in inches offic.
ping pong table size paddle dimensions paddles standard.
standard ping pong table dimensions regulation size midsize tennis room dim.
standard size table tennis product ping pong dimensions room tiger chart.
standard ping pong table size professional tennis match length dimensions room professio.
standard size ping pong table tennis board for in inches tabl.
regulation ping pong table dimensions size and clearance standard pi.
regulation size ping pong table standard blue dimensions room re.
table tennis size dimensions ping pong tables top standard free rackets net.
ping pong table sizes size dimensions what are the standard in inches pro.
regulation ping pong table size dimensions of a top standard in inches pin.
regulation ping pong table size room dimensions what is the official standard in inches si.
size ping pong table dimensions height of measurements regulation length fantastic inches standard fl.
excellent ping pong table size full dimensions standard regulation room dimensio.
ping pong table tennis official size standard folding portable outdoor game set regulation inches.
room size for ping pong table of tables tennis a separate international standard dimensions pin.
the standard size of a ping pong table measures 9 long by 5 wide so your room needs to be at least this if just playing solo with.
size of a ping pong table tennis dimensions standard length.
table tennis dimensions ping pong size in mm standard t.
height of ping pong table midsize tennis dimensions inches standard size room.
ping pong table size sizes dimensions inches standard in.
ping pong table dimensions inches height standard size regulation room.
elegant folding ping pong table wanted old regulation size standard dimensions room.
official ping pong table size photo 7 of standard in inches regulation room.
mid size ping pong table height of standard in inches tabl.
different manufacturers standard ping pong table size regulation room what is the official of a.
dimensions of a ping pong table inches standard size length.
ping pong tables also standard table size regulation room what is the official of a.
room size for ping pong table tennis tables sale standard regulation inches.
Tagged with ping pong table dimensions room size regulation ping pong table room size regulation ping pong table size inches standard ping pong table length standard ping pong table size in inches standard size ping pong table dimensions
|
OPCFW_CODE
|
Azure Python API to list recovery service vault
Is there any API in python to list out recovery service vault name
Hi @indumathyn
We do not have the SDK right now, but the service team made the meta-description of the RestAPI that is the input for generating a Python SDK. I contacted the service team and I'm waiting for the approval on this meta description to be able to propose a preview version of the package in Python.
Thanks!
Hi @indumathyn
The package was just released as preview:
https://pypi.python.org/pypi/azure-mgmt-recoveryservices
So I close this issue, since we were talking about availability. But do not hesitate to create issue if you encounter any issue using it.
Thank you,
Am using this api to create vault,
recovery_client = RecoveryServicesClient(credentials, subscription_id)
recovery_client.vaults.create_or_update(resrcgrp,vaultname,vault)
Dono what input to give for vault
Hi @indumathyn
Please read the documentation:
https://docs.microsoft.com/en-us/python/api/azure-mgmt-recoveryservices/azure.mgmt.recoveryservices.recoveryservicesclient?view=azure-python
Unittest might help:
https://github.com/Azure/azure-sdk-for-python/tree/master/azure-mgmt-recoveryservices/tests
Already am using that api only...i need that last arguments what need to
give only...
My actual task will be to fetch api to take backup for vm like recovery
vault job items service...for that dono what are the steps have to be
follow....please guide me...
Regards,
Indumathy N.
On Wed, 25 Jul 2018, 2:05 am Laurent Mazuel<EMAIL_ADDRESS>wrote:
Hi @indumathyn https://github.com/indumathyn
Please read the documentation:
https://docs.microsoft.com/en-us/python/api/azure-mgmt-recoveryservices/azure.mgmt.recoveryservices.recoveryservicesclient?view=azure-python
Unittest might help:
https://github.com/Azure/azure-sdk-for-python/tree/master/azure-mgmt-recoveryservices/tests
—
You are receiving this because you were mentioned.
Reply to this email directly, view it on GitHub
https://github.com/Azure/azure-sdk-for-python/issues/1264#issuecomment-407543089,
or mute the thread
https://github.com/notifications/unsubscribe-auth/AFyIxL0g42B6iRApTMlyOSj8vFMrdFKuks5uJ4ULgaJpZM4OHmM-
.
From the link I sent to you, clicking on "vaults" and then "create_or_update" gives you a link to Vault definition:
https://docs.microsoft.com/en-us/python/api/azure-mgmt-recoveryservices/azure.mgmt.recoveryservices.models.vault?view=azure-python
I don't have details, since I don't manage this SDK. Morever, you commented on a closed issue, which makes our discussion off-topic :/
Thanks
Already am using that api only...i need that last arguments what need to give only... My actual task will be to fetch api to take backup for vm like recovery vault job items service...for that dono what are the steps have to be follow....please guide me... Regards, Indumathy N.
…
On Wed, 25 Jul 2018, 2:05 am Laurent Mazuel, @.***> wrote: Hi @indumathyn https://github.com/indumathyn Please read the documentation: https://docs.microsoft.com/en-us/python/api/azure-mgmt-recoveryservices/azure.mgmt.recoveryservices.recoveryservicesclient?view=azure-python Unittest might help: https://github.com/Azure/azure-sdk-for-python/tree/master/azure-mgmt-recoveryservices/tests — You are receiving this because you were mentioned. Reply to this email directly, view it on GitHub <#1264 (comment)>, or mute the thread https://github.com/notifications/unsubscribe-auth/AFyIxL0g42B6iRApTMlyOSj8vFMrdFKuks5uJ4ULgaJpZM4OHmM- .
Already am using that api only...i need that last arguments what need to give only... My actual task will be to fetch api to take backup for vm like recovery vault job items service...for that dono what are the steps have to be follow....please guide me... Regards, Indumathy N.
…
On Wed, 25 Jul 2018, 2:05 am Laurent Mazuel, @.***> wrote: Hi @indumathyn https://github.com/indumathyn Please read the documentation: https://docs.microsoft.com/en-us/python/api/azure-mgmt-recoveryservices/azure.mgmt.recoveryservices.recoveryservicesclient?view=azure-python Unittest might help: https://github.com/Azure/azure-sdk-for-python/tree/master/azure-mgmt-recoveryservices/tests
Hello @indumathyn ,
I was trying to achieve a similar task to get RSV infos. Could you find any useful answer regarding " what to give for 'VAULT' parameter?" issue?
Any guidance would be appriciate.
Thanks a lot :))
Hello @indumathyn , I was trying to achieve a similar task to get RSV infos. Were you able to find any useful answer regarding " what to give for 'VAULT' parameter?" issue? Any guidance would be appriciate. Thanks a lot :))
@lmazuel , @indumathyn
I figured that 'VAULT' variable is the name of the RSV resource. But I don't want to get only the specific RSV infos but all the RSV resources under a given resource group.
Any ideas on How to achieve this task?
|
GITHUB_ARCHIVE
|
dotnet add package UniversalWPF --version 0.9.0
NuGet\Install-Package UniversalWPF -Version 0.9.0
<PackageReference Include="UniversalWPF" Version="0.9.0" />
paket add UniversalWPF --version 0.9.0
#r "nuget: UniversalWPF, 0.9.0"
// Install UniversalWPF as a Cake Addin #addin nuget:?package=UniversalWPF&version=0.9.0 // Install UniversalWPF as a Cake Tool #tool nuget:?package=UniversalWPF&version=0.9.0
A set of WPF Controls ported from Windows Universal
NOTE: This is mostly a work in progress. TwoPaneView is fully working, but might have issues on Windows X, as the necessary APIs there to do screen spanning are not yet exposed. State Triggers are not working yet. RelativePanel should work but needs lots of testing (please help!), SplitView is partially working, but needs some work still.
If you like this library and use it a lot, consider sponsoring me. Anything helps and encourages me to keep going.
See here for details: https://github.com/sponsors/dotMorten
- TwoPaneView - A full port of UWP's TwoPaneView control, including support the Windows X Dual-screen devices.
- NumberBox - A full port of UWP's NumberBox control.
- RelativePanel - Fully implemented but needs more testing (please help and report any rendering differences between this and Universal's RelativePanel). Want to see this built-in in WPF? Vote here
SplitView (Very much work in progress - doesn't animate in/out and closed compact mode isn't rendering)
StateTrigger / AdaptiveTrigger (API complete, functionality not so much)
|.NET||net5.0 net5.0-windows net6.0 net6.0-android net6.0-ios net6.0-maccatalyst net6.0-macos net6.0-tvos net6.0-windows net7.0 net7.0-android net7.0-ios net7.0-maccatalyst net7.0-macos net7.0-tvos net7.0-windows|
|.NET Core||netcoreapp3.0 netcoreapp3.1|
|.NET Framework||net452 net46 net461 net462 net463 net47 net471 net472 net48|
- No dependencies.
- No dependencies.
This package is not used by any NuGet packages.
This package is not used by any popular GitHub repositories.
|
OPCFW_CODE
|
Database replication is a special instance of data integration, and consists of extracting and loading data from your databases to your data warehouse. Once the data is in a central repository, you can model and analyze the data to produce actionable insights into your operations.
However, building a data pipeline to move data from operational databases to a data warehouse is a highly involved process that includes a wide range of non-obvious technical challenges to solve and features to build, including:
- Incremental updates
- Schema updates
- History mode
- Data type mapping
- Replication and sync monitoring
- Loading optimization
- Support and assistance
Why database replication is complicated
If your data pipeline does not adequately address the challenges or features listed above, then your data operations will suffer poor performance, serious data integrity problems and demoralized contributors. Here are the reasons why.
Your data pipeline shouldn’t make a full copy of each of your data sources every time you update your destination. While a full synchronization is necessary the first time you ever sync your data, doing it routinely will:
- Increase the duration of each sync, making it impossible to provide quick or real-time updates
- Bog down both the source database and destination data warehouse, using resources otherwise needed for operations and analytics, respectively
- Consume excessive network bandwidth
These problems grow in scale as you add more data sources and the overall volume of your data increases.
The solution is to only sync data that has been updated, rather than entire data sets, i.e., to perform incremental updates. In the case of database analytics, this usually means querying changelogs that contain a full history of updates, new records and deleted records.
Idempotence means that if you execute an operation multiple times, the result will not change after the initial execution. A good example is an elevator console — pushing the button for a particular floor multiple times will achieve the same result as pushing it once.
In terms of data pipelines, idempotence prevents the creation of duplicate records when data syncs fail and must be repeated.
Without idempotence, data syncs that have failed and must be repeated will produce duplicate or conflicting data. This will produce the following problems:
- Misleading or erroneous insights – counts, rank orders, sums, averages and other metrics will be thrown off
- Broken operations – especially if they rely on one-to-one correspondence between records and identifiers
- Wasted storage space and computational bandwidth – you will end up storing a larger volume of data
Data syncs can fail as a result of outages or bugs at the source, i.e. operational databases, within the data pipeline, and at the destination.
Your sources will evolve over time, adding, removing, or modifying fields or tables. Your data pipeline must reconcile changes between the source and destination without destroying old data.
There are several solutions to the different instances of this problem, such as soft deletes, in which deprecated fields or tables are flagged rather than removed. But what about database fields whose values simply change over time?
The ability to track changes to metrics over time is essential to learning from your organization’s past, specifically identifying trends such as changes in usage and the status and growth of accounts over time. If a particular field changes at the source, the old value is lost forever if you don’t keep track of the change. For tables where retrospective analysis might be essential, you will need to find some way to track every version of every record and revisit past versions.
Data type mapping
Not all of the data types between sources and destinations will be compatible. You must create a hierarchy of data types and assign the minimum appropriate size in the destination for each field in the source. Over one field and a few hundred or thousand rows, the difference between 16 bytes or 8 bytes per record won’t mean much in terms of storage or memory use. However, these problems add up over multiple fields and millions of rows.
Replication and sync monitoring
Your data pipeline should allow you to determine the freshness of your data by observing when it was last synced and the status of that sync. It should also allow you to set an interval for syncing. Without some kind of graphical user interface, you will be wholly dependent on your engineers to keep track of this information.
There are many ways for your data pipeline to be bottlenecked. If your data pipeline runs on cloud infrastructure (and it should), you will need to optimize the performance of your pipeline, queries, data warehouses and databases as your data traverses various networks. Ultimately, you will have to determine what cost and performance profile fits your needs and tune your system accordingly.
On the topic of security, there are many considerations in addition to the network traffic concerns listed above. Data storage, access control, column blocking and hashing, data governance, SAML access, regulatory compliance, and more all deserve attention.
Support and assistance
No technology is infallible. When your data pipeline inevitably breaks down, your analysts will require the attention of engineers and other specialists for maintenance and support.
Your data needs will inevitably grow in scale as your use of data matures. You will need access to more hardware and resolve larger, more complicated versions of every problem listed above.
Support database analytics with automated data integration
Extracting, loading and transforming data from databases should be painless and involve a minimum of human intervention. A homespun solution to data integration will, even in the best of cases, be painful and highly labor-intensive.
At Fivetran, we offer an automated, off-the-shelf solution to data integration. Consider a free trial or a demo, and see for yourself how you can circumvent all of the complications and challenges inherent in integrating and analyzing database data.
|
OPCFW_CODE
|
Suggestion: Trackers through arguments
This is a cool and fun idea indeed. Thanks for this!
I'd like to suggest an alternative API that maybe would make more explicit what $ and _ do, and make the implementation slightly easier.
Instead of importing $ and _ from the package, what if these two trackers could be passed as arguments to run? So instead of:
import { run, $, _ } from 'rxjs-autorun';
const a = new BehaviorSubject('#');
const b = new BehaviorSubject(1);
const c = run(() => _(a) + $(b));
You could:
import { run } from 'rxjs-autorun';
const a = new BehaviorSubject('#');
const b = new BehaviorSubject(1);
const c = run((track, read) => read(a) + track(b));
This would also let the consumer name $ and _ as they want (in this case, track and read respectively), but they could also go with a smaller t and r.
Hey, Víctor! You've actively participated in this, so big thanks goes to you 🙂 🙌
Yeah! Actually this was my initial API: run($ => $(o)) and I think it was nice and conscious (user wouldn't try to call those functions outside run). Though particular use-case for Ryan, as I understood, required those track/read functions to be available outside the run for integration purposes.
And it makes sense if you want to integrate this with, say, a custom state Observable, e.g.:
// pseudocode
// state.js
import { $, _ } from 'rxjs-autorun';
export const State = v => {
const subj = new BehaviorSubject(v);
return Object.assign(subj, { get _: _.bind(null, subj), get $: $.bind(null, subj) });
// not sure it'l work, but you got the idea
}
// index.js
import { run } from 'rxjs-autorun';
import { State } from './state';
const state = State(0);
run(() => state.$ + ' 🐑'); // > 0 🐑
state.next(1); // > 1 🐑
Which wouldn't be possible with run($ => $(o)).
But maybe we could have both options available for the user? Would that be fine or confusing?
Ahh, that makes sense, thank you for explaining.
But maybe we could have both options available for the user? Would that be fine or confusing?
Although I guess it wouldn't hurt (as users can just ignore one of both ways), it's not really needed. If anyone needs to use the suggested overload, it's fairly trivial to write a util function that exposes that API.
I'll close this issue - feel free to reopen it if you think it's worth keeping it in.
I think, you're right that it's a good alternative and it wouldn't hurt
Let's keep this issue open, maybe actual use-cases (ha-ha!) will advice us how to act 🤔
Truly, it's a one-line fix 🙂
Also, I guess, there's a side-point that $ and _ are not the best names for those functions, will create a separate issue to rename/add aliases.
I like to add my 2ct here as well.
It could be argued that another downside of passing $ and _ as arguments of the callback function could be that when you do a nested run, $ and _ would be shadowed, and the compiler would warn you. But I consider this a minor thing because:
I think nested runs are not gonna be used that often because I expect that would lead to awful expressions...
You can easily mitigate that by using different names in the nested run (e.g. $2 and _2)
But an upside is that you don't have to import $ and _. $ is still used for jquery a lot, and _ is often by convention used as unused argument.
So I would very much argue in favor of passing $ and _ as arguments as well.
Follow up to this discussion, I would also like to argue in favor of $ and _ being passed as arguments:
In case of provided example I am a bit confused as to the benefit here, since $ => $(state) + ' 🐑' would achieve the same thing. So I am particularly unsure of the general situation that such integrations would be useful and how they would become a serious hurdle if $ and _ were passed as arguments.
As mentioned on the reddit discussion, I believe not having an import side-effect (i.e. the context which is the basis of global $ and _) is much more tree-shaker friendly. I am unsure of current state of tree-shakers and how well they can handle Ok side-effects such as this.
Ergonomically, the ability to seamlessly rename would also be pretty cool. Imagine this:
() => `${${a}} 🐑`
vs this:
_ => `${_(a)} 🐑`
(although the best option might be an added feature like this):
() => rx`${a} 🐑`
I'm looking forward to trying some integrations (when I have time) to see what are the limits of the two approaches. E.g. I want to try autorun with rxjs-proxify or a simple state based on it. Unlike pipe(pluck('prop')), both have stable access to subproperties. Maybe something like:
const state = new State({ a: 1, b: { c: 2 } });
computed(() => state.a._ + state.b.c.$); // > 3
state.b.next({ c: 3 }); // > 4
Also, a.$ + b._ notation to me looks a bit less noisy than $(a) + _(b).
--
() => rx${a} 🐑
Nice! Maybe it's a candidate for an API. I also wonder if can we achieve this via external code now and would we be able to do that with $_ as arguments.
Btw, @voliva did an interesting string template-based approach
--
Now, back to the issue: currently, I see this change as not critical (unless it forbids tree shaking)
And therefore would like to do more research on integrations and explore the API (#3), as those might give us a better understanding of this feature
Please, keep sharing your thoughts and findings!
Also, if you still consider this to be a critical issue — feel free to elaborate on this
Cheers
So, I've been typing-thinking. Guess, if we pass params as args, we can still achieve the separation:
Warning: pseudocodish
// shared.ts
import * as rxjsAutorun from 'rxjs-autorun';
export function unwrapComputed(){
const c = { _(){ throw 'ERR'; }, $(){ throw 'ERR'; } };
return [ o => c.$(o)
, o => c._(o)
, fn => rxjsAutorun.computed(($,_) => c.$ = $, c._ = _, fn())
]
}
So that we can use it in 3rd party API:
// state.ts
import { unwrapComputed } from './shared';
const [$,_, computed] = unwrapComputed();
const State = v => {
const subj = new BehaviorSubject(v);
return Object.assign(subj, { get _: _.bind(null, subj), get $: $.bind(null, subj) });
// not sure it'l work, but you got the idea
}
export { State, computed }; // < NOTE exporting computed from state
And then:
// index.js
import { State, computed } from './state';
const state = State(0);
computed(() => state.$ + ' 🐑'); // > 0 🐑
state.next(1); // > 1 🐑
But I suspect that in this case we are locked to computed only from this API: in index.js computed from rxjs-autorun won't work. Neither would other 3rd party integrations if we have such.
Am I right in this assumption? What do you guys think?
How about providing the arguments to computed() alongside making them available globally? This way we would have some of the good parts of both worlds:
Easy renaming
Async invocation of $ and _
Still supporting the state.$ syntax (though with the added complexity that async invocation wouldn't work here)
This is definitely an option! I'm holding this because ($,_) => notation might limit us with alternative APIs we're exploring and this duality might confuse users. As we discussed, async invocation is doubtful, so I think we can deprioritize it.
Since the current approach works fine now, I'd like to postpone this decision until we got more practical cases.
Unless we discover it's critical, surely.
|
GITHUB_ARCHIVE
|
|< Building a portfolio: should you ship an app or put code on GitHub?
|How to do well at a Swift interview >
Recorded – watch the full episode on YouTube.
What are your thoughts on iOS developers needing a computer degree in order to succeed?
John Sundell: There are so many different paths. I can tell you that I firmly believe that you do not necessarily need to have a computer science degree, because I don't have one.
So I do not have a degree, in fact I have only been to a university once in my life, and that was to give a guest lecture. And I don't say that to brag or anything, and I don't want people to misunderstand me here to say that you don't need an education, or you should never take an education, absolutely not, that's not what I'm saying at all. I'm just saying again, that there are so many different paths that you can take to achieve your goals, and all of them have trade-offs. And if you wanted to play this as a drinking game, that's another word you can add to the list, "trade-off."
"I'm just saying again, that there are so many different paths that you can take to achieve your goals, and all of them have trade-offs."
So, when I didn't go to like a university to get a CS degree, the trade-off of that, for me, was that I had to learn everything myself. I had to learn how to write apps, learn how to code everything myself. And I've been coding since I was a kid, so it came a little bit natural to me in that way. My dad was a programmer, I became a programmer, the apple doesn't fall far from the tree, et cetera, et cetera. So, when I started getting really serious about going into development and becoming a full-time developer, for me the trade-off there, since I didn't have that formal education, was that I had to spend so much time learning things on my own.
And one really interesting aspect of that was that I was very pragmatic naturally of my learning, like I always learn things that I need at that moment. So as I was building websites, which is what I started with, I was a back-end developer, as I was learning data structures and databases and things, I was learning that as I went.
"I think depending on which path you take, you have to be prepared to compensate."
And one really interesting thing that happened, was that at one point in my career, I was sitting with a coworker and we were discussing different search algorithms, and he mentioned the Big O notation. So he was like, “oh, if we take this path it will be like O(n), if we take this path, it will be like O(n²)." And I had never heard of the Big O notation before then, so I was like, “uh, what is this, what is this person talking about?” So I had to...when I came back home that day, I was Googling, “what is Big O notation?” and learning about that.
"You don't need to have a computer science degree, but it can be really helpful and it can be one way to achieve your goals."
I think depending on which path you take, you have to be prepared to compensate, because on the other end of the spectrum, you have a CS degree. And computer science degrees, as far as I know, are very theoretical. You're learning about the fundamentals of computer science, you're not learning the iOS SDK, so then you have to compensate for that, by actually learning the SDK and learning the APIs. And that's something that I didn't really have to compensate for, because that's how I learned. So, in my opinion, at least, you don't need to have a computer science degree, but it can be really helpful and it can be one way to achieve your goals.
There are also many new ways of getting an education as well. For example in Sweden, where I'm from, there is now a very, very popular trend of having these two or three year education programs, where a big part of that is spent at companies, actually working at companies. Hands-on, internships. Those have become really popular, because they provide a little bit quicker, easier way to get out into the workforce and to learn something rather than spending many, many years at university. And that's another trade-off there, that's another thing you can go for. So there are many different paths you can take.
Paul Hudson: A CS degree is just one of the many paths, it sounds like, but at the same time, if you aren't going to take that path, you are at least committing yourself to backfill the parts you would have learned in university. Like for you, the Big O notation. At some point, you think, “I’m going to hit this wall, I will learn it,” which you would have done at university, presumably, depending which of course you took, but you will do at some point yourself later on, on an ad hoc, as needed basis, basically.
John Sundell: And it's kind of funny, because a lot of the things that I tend to write about and that I'm interested in, are very computer science-related. So, I think I would have absolutely loved doing computer science, it's just that's not how things worked out for me in my life.
This transcript was recorded as part of Swiftly Speaking. You can watch the full original episode on YouTube, or subscribe to the audio version on Apple Podcasts.
BUILD THE ULTIMATE PORTFOLIO APP Most Swift tutorials help you solve one specific problem, but in my Ultimate Portfolio App series I show you how to get all the best practices into a single app: architecture, testing, performance, accessibility, localization, project organization, and so much more, all while building a SwiftUI app that works on iOS, macOS and watchOS.
Link copied to your pasteboard.
|
OPCFW_CODE
|
/* Copyright 2003, Carnegie Mellon, All Rights Reserved */
package edu.cmu.minorthird.classify.transform;
import java.util.Iterator;
import edu.cmu.minorthird.classify.BasicDataset;
import edu.cmu.minorthird.classify.Dataset;
import edu.cmu.minorthird.classify.Example;
import edu.cmu.minorthird.classify.Instance;
import edu.cmu.minorthird.classify.multi.MultiDataset;
import edu.cmu.minorthird.classify.multi.MultiExample;
/**
* @author William Cohen
* Date: Nov 21, 2003
*/
abstract public class AbstractInstanceTransform implements InstanceTransform
{
@Override
final public Example transform(Example example)
{
return new Example( transform(example.asInstance()), example.getLabel() );
}
final public MultiExample transform(MultiExample example)
{
return new MultiExample( transform(example.asInstance()), example.getMultiLabel() );
}
@Override
final public Dataset transform(Dataset dataset)
{
Dataset transformed = new BasicDataset();
for (Iterator<Example> i = dataset.iterator(); i.hasNext(); ) {
transformed.add( transform(i.next()) );
}
return transformed;
}
final public MultiDataset transform(MultiDataset dataset)
{
MultiDataset transformed = new MultiDataset();
for (Iterator<MultiExample> i = dataset.multiIterator(); i.hasNext(); ) {
transformed.addMulti( transform(i.next()) );
}
return transformed;
}
@Override
abstract public Instance transform(Instance instance);
}
|
STACK_EDU
|
I stood in the middle of the cell, listening as the hissing drew nearer. Widening my stance, I prepared to fight whatever came my way. I may have been captured and held against my will, but I would not die without defending myself. Clanging came from the other end of the corridor, along with a feral sounding growl. The hisser and the growler met near my door with a standoff. To me, the two still unseen creatures were still just hissing and growling, but there seemed to be some kind of animal communication taking place between the two. The growling became louder and more intense when suddenly, the hissing faded slowly into the distance.
The growling slowly stopped, but never got any further from my door. I could hear more clanging and then fabric rustling, as if the growler was straightening itself for presentation.
The brindle cat who had so presumptuously placed herself in my yard last week was standing upright at my cell door wearing a full jacket and leather boots. I assumed she was fishing a set of keys out of her coat pocket by the jangling. The strangest part was that she stood as tall as I did.
I was speechless. Had I been shrunk? Had she grown? How was she going to use the keys once she got them out of her pocket?
She noticed me watching her and said, “Your Majesty, my name is Sasha. I’ve come to rescue you. Are you injured?”
I understood everything she said, but the understanding shocked me so badly that it took me a moment to respond. “No,” I was finally able to respond. “I’m not injured. Who are you? What are you?”
Normal, proper formalities were simply beyond me at this point.
“Madame Petra, if we stay to discuss all of this, that snake’s friends will surely be on their way soon. We must leave now,” said the cat while she opened the cell door.
She grabbed my hand and pulled me out of the cell. To my right, the passageway was clear. Sasha turned to the left and started scrambling over something. As she reached the top of the obstruction, I could finally see what it was. It was the head of a dead snake, the scales of its face still moist and covered with mucus and it’s body presumably following behind the head. A droplet of venom hung from the snake’s right fang. The dim light provided by the lightning bugs’ backsides showed only that the color of the snake matched that of the earthen walls.
“Sasha?” I called out, alarmed.
“Madame,” she said, “The other direction promises a live snake. This direction definitely has a dead snake and only the threat of more. We need to go now.”
Sasha was quickly disappearing behind the head of the snake, so I pulled up the skirt of my muslin dress and put one foot on the snake’s snout and started climbing. The snake’s scaled flesh was still warm and moved under the pressure of my feet. Deciding to put my initial repulsion behind me, I realized climbing over the snake was really just like climbing over a hill like I had done so frequently as a child. The only difference was that the snake’s head and body were warm and moved slightly different than dirt. I had to get down on my hands and knees as the snake’s body filled most of the space of the passageway. Sasha continued lithely ahead of me, appearing to easily scramble over the carcass of the snake. I had more difficulty, but I’m positive that my difficulties were due to my shock at the whole situation and not to any physical limitations.
I saw Sasha reach the end of the snake’s body and stop to wait for me. It took me a few moments more before I reached the end of the snake. At the end of it’s body, I noticed the rattle. I counted the rattles and came up with eighteen. We were in a tunnel that had been filled almost by half by a snake that could only have been around ten feet long.
“Sasha, I want my questions answered now. Where are we? What has happened to me?” I needed answers before I continue through the corridor.
|
OPCFW_CODE
|
Apply for different positions in a company
I applied for a position in a company (I am a programmer), let's call this position is Position A.
After I applied Position A, I just realized that there is Position B is opened by that company which has requirement more suits to my skills rather than the one that I applied, Position A.
Some days later, the company went back to me to say that the application that I applied, Position A, had been closed and they had found other candidate that more suits and keep my details for future reference if there is suitable job opportunity arise.
However, a week later, the company sent an email to offer me other position that doesn't suit me at all, Position C. Can I reply to the company to apply for Position B that suits to me much better?
What I consider is they informed me that they have kept my details but they refer me to a job opening, Position C, which doesn't suit my skills at all.
Sure you can. Why can't you?
"Can I reply to the company to apply for the position that I have been mentioned above that more suits to me?" Voting to close on the grounds that the post is poorly written. I am not a mind reader.
JaneS: Please see my edited post, I have consideration in last sentence.
VietnhiPhuvan: lol sorry, I have put more precise detail.
@RON My comment doesn't change. You didn't get position A, you can apply for B if you like. It doesn't matter if they offered you to apply for C in that context. You could possibly (but not necessarily) read that by contacting you that they are interested in you. However that could easily be an HR thing and mean very little.
The simple answer is that you are more than welcome to ask for the other position, they just do not have to accept your request. You will likely have to go back through the selection process, unless the managers for both positions are the same.
In the mean time, you would almost certainly have to give up the position that you were offered. Whether this is an issue is a matter of personal taste - if you really want to work for that specific company it might be worth working a less than ideal job (with the goal of applying for a transfer later), but of course there is the risk you loose other valuable skills in the interim and can never make the transfer.
One thing to keep in mind is that they thought you matched Position C best. At a minimum you should write a very good cover letter when asking about the position you want explaining why you are such a good fit for it.
Please see my edited post. You are mostly correct with your interpretation of what I ask. Btw, I haven't gone through any interview process yet.
I updated my reply to take the new information into account :)
|
STACK_EXCHANGE
|
Every year the Schwippy Christmass Tree is turned on and streamed to the world, allowing anyone who wants to the ability to turn the Christmas lights on or off. During the Christmas season, check it out at Tree.Schwippy.com
The technology used to control the tree is actually quite lo-tech. I have been using nothing but X10 technology. While a good relay/triac controller hooked up to the computer would be nice, you just cant beat the simplicity of X10.
Each different strand of Christmas lights has its own X10 lamp module and address, in this case G5, G6, and G7. There are 3 strands of lights per color. The lights are wired in series - one plugged into the end of the other - so all you see here is the head strand being plugged into each X10 module. X10 lamp modules were chosen over appliance modules so the lights would fade in and out instead of instantly turn on or off. It makes for a much more plesant scene. All the X10 modules are simply plugged into a power strip behind the tree out of sight.
The main controlling aspect of the entire system is the X10 computer interface module (Model CM11A). It is hooked up to a server running Ubuntu that is turned on all the time.
Since X10 is a power-line radio protocol, I dont have to have the equipment running in the same place, as long as they are on the same power grid (within a certain length, of course). The computer is in one room, while the tree is in a completely seperate room.
The computer sends a simple serial string to the CM11A module, which then translates the command into an X10 command and injects it onto the power grid for any receivers to capture. Unlike other X10 transmitters, the CM11A doesnt have to be locked down to one house code (The letter part of the code). Any house code and any unit code command can be sent from the computer.
The main html file that hosts the tree's page (Located at tree.schwippy.com) is hosted on an off-site server. If you are interested in the client-side scripting, just view the source of that file. On that page are the buttons that control the lights. Instead of the offsite server controlling the lights, it instead contacts a computer I have set up at home. The PHP file that is called on that computer is located below.
As you can see, when it detects that a user is trying to issue a command to turn a light on or off, the php file simply executes a commant on the shell of the computer. In this case, "heyu". Heyu is a great program written specifically for controlling X10 related hardware. In its simplest form, which is what is being used here, Heyu is executed as heyu <command> <address>. Issuing a the command "heyu on G2" will turn on any appliance that is hooked up to an X10 module with the code set to G-2.
Heyu needs to be compiled manually, so if you're trying to get it installed, make sure you know how to do this before hand. Once you have it installed, you will be able to use this for pretty much any home-automation project you can think of when it comes to scripting your house.
Also a thing to note, if you are using this script above, you will need to give the web user the privledge to execute the heyu command on the system. Make sure you have your system set up securely before opening any execution privledges.
Why it was done
The Schwippy Christmas Tree was inspired by Alek's controllable Christmas light setup. Definately check out his website at www.Komar.org. As well as doing controllable Christmas lights, he also does controllable Halloween decorations. All his proceeds go towards donations for researching Celiac Disease, which both his kids have.
My project was not done to try and 'one-up' Alek's, but rather just because it seemed like a fun project to do. It didn't seem to require a lot of effort (which it didn't), it was something that had high WAF (Wife Acceptance Factor), and just in general seemed like something people would enjoy (which they do).
This shows how much money I spent on the tree the first year setting it up. Each subsequent year it obviously only costs me the price of a new tree.
|Tree||We all know they vary in price||89.00|
|X10 Lamp Modules||One for each color (4 @ $12 ea.)||48.00|
|X10 CM11A Module||Try looking on Amazon||15.00|
|Christmas Lights||3 strands per color @ $4 each strand||48.00|
|
OPCFW_CODE
|
Hi again hoghoi1
I'll jump in.
How to use Multi-Language Manager
(just added this heading so other folks will also find this post via forum search)
Multi-Language Manager uses a Gadget to show language selection and manage page/menu associations.
Once you have Multi-Language Manager installed, it's Gadget called "Language Select" will show automatically if your theme has a "GetAllGadgets" Area. If not, the Gadget won't show automatically.
Let's use the Bootswatch Flatly Theme:
With this theme we will have to manually add the "Language Select" Gadget to the layout.
But first I'd recommend to activate to the "Off-Canvas" layout variant of the theme im order to have a sidebar:
- Click Admin Panel ⇒ Appearance ⇒ Manage Layouts , then "Available Themes"
- On the "Bootswatch Flatly 1.0" Preview Panel click "5 Off Canvas" whicht gets you into Theme/Layout Preview Mode.
At the top left corner of the window click the green box Use this theme in a new layout
In the pop-up box check "Make default" and click Save .
You have now made "Bootswatch Flatly/5_Off-C" yout default theme/layout.
Now let's edit the layout and include the "Language Select" Gadget:
- Click Admin Panel ⇒ Appearance ⇒ Edit this Layout which will start Layout Edit Mode.
- At the right side of the page you'll see a (likely empty) light grey Sidebar area containing a darker grey field wit a dotted outline.
Hovering you mouse pointer there will reveal a Insert button. Click it.
- In the appearing popup box switch to Gadgets and select click Add in the Language Select row.
- The Multi-Language Manager Gadget is now placed in the layout and can be used.
- To leave Layout Edit Mode click « Layouts at the top left or alternatively any menu Item in the site header.
The concept of Multi-Language Manager is pretty straigt-forward: It let's you associate pages of different languages and automatically cross-links them in all menus. So, the workflow is:
1) Create a additional language page (or make a copy of the main language page and translate the content)
2) Associate them using the "Language Select" Gadget.
Let's simply go the copy-page route:
- Navigate to your Homepage.
- Click Admin Panel ⇒ Current Page ⇒ Copy (You may aslo copy pages in the Page Manager)
- You will be prompted a input for a new name. I'll name the new page "Startseite" (german for "Homepage").
After clicking the Continue button, a yellow message will appear saying that you will be redirected to the new page in 15 sec.
Let's skip this for now by simply clcking the "Home" link in the Website's main menu. We can translate the new "Startseite" later.
- As you may have noticed, when you're logged in, the "Languag Select" Gadget will show a "Admin: Add Translation" link.Click it now - the "Page Settings" box will pop up. Assuming your website's main language is English, the entry in the 1st row will be Language: English | Title: Home.
The second row provides two select boxes which will use gpEasy's Autocomplete functionality you may already be familiar with…
- Place the caret (text cursor) in the left select box in the "Language" column and type "de" (without quotes). The Autocomplete list will show a language selection (in fact it shows all available languages that contain "de" in their name or abbreviation).
The second list item will be "Deutsch/de" ⇒ Click it. Do not simply leave "de" in the input field!
- Now click the select box on the right in the "Title" column. Autocomplete will list available pages here.
Typing "st" will show "Startseite/Startseite" in the Autocomplete list. Again, clickt it. Then hit Save .
- The "Page Settings" box stays open offering to associate even more language pages with the Home page. We don't need to, so close the box and click the "Refresh this page" in the yellow message bar at the top. Or simply hit F5, Ctrl+R ot whatever your browser uses to reload a page.
- Your "Home" page is now associated with "Startseite" and gpEasy knows which languages the pages are in.
This was the main part of the Multi-Language Manager workflow. After refreshing the page you will see …
Admin: Add Translation
… in the "Language Select" Gadget in the sidebar.
Normal visitor will only see…
Clicking the "Deutsch" link will get you to the german "Home" page.
You will notice that "Home" in the website's main navigation will turn into "Startseite" now.
You now could edit this page and start translating it to german. Or you could repeat the above steps and make your whole website dual-language first and start translation afterwards. The workflow is non-linear. You decide.
There are some more things to be said about using Multi-Language Manager but this should be a good starting point.
|
OPCFW_CODE
|
The Unix OS, first released in 1969 and titled “UNICS”, was widely adopted by academic institutions due to its portability. Unlike Windows and Mac, Unix is not written in assembly. Rather, Ken Thompson along with Dennis Richie wrote the Unix kernel in C. That’s where Unix’s portability comes into play; since the OS isn’t tied to a particular machine, it is an open system that can run on various hardware. Though, it’s portability does extend beyond simply having its kernel written in a higher language than Assembly; the design of the OS itself strips it bare of the bloat that is inherent in other commercial operating systems.
While there are many who love Unix systems for their minimal overhead, there are others who don’t. The Unix-Haters Handbook, a compilation of messages sent to the Unix-Haters mailing list, reveals the gripes that programmers have with Unix systems. The contributors provide hilarious commentary that even the staunchest Unix advocate may find understandable.
Unix systems have a thread bare user interface:
Unix was a programmer’s delight. Simple, elegant underpinnings. The user
interface was indeed horrible, but in those days, nobody cared about such
things. As far as I know, I was the very first person to complain about it in
writing (that infamous Unix article): my article got swiped from my computer, broadcast over UUCP-Net, and I got over 30 single-spaced pages of taunts and jibes in reply. I even got dragged to Bell Labs to stand up in
front of an overfilled auditorium to defend myself. I survived. Worse, Unix survived.
Ironically, the very attributes and design goals that made Unix a success
when computers were much smaller, and were expected to do far less, now
impede its utility and usability. Each graft of a new subsystem onto the
underlying core has resulted in either rejection or graft vs. host disease with
its concomitant proliferation of incapacitating scar tissue.
The Unix networking model is a cacophonous Babel of Unreliability that quadrupled thesize of Unix’s famed compact kernel. Its window system inherited the cryptic unfriendliness of its character-based interface, while at the same
time realized new ways to bring fast computers to a crawl. Its new system
administration tools take more time to use than they save. Its mailer makes
the U.S. Postal Service look positively stellar.
Contributor compares Unix to a virus
Unix possesses all the hallmarks of a highly successful virus. In its original
incarnation, it was very small and had few features. Minimality of design
was paramount. Because it lacked features that would make it a real operating system (such as memory mapped files, high-speed input/output, a robust file system, record, file, and device locking, rational interprocess
communication, et cetera, ad nauseam), it was portable.
A more functional operating system would have been less portable. Unix feeds off the energy
of its host; without a system administrator baby-sitting Unix, it regularly
panics, dumps core, and halts. Unix frequently mutates: kludges and fixes
to make one version behave won’t work on another version. If Andromeda
Strain had been software, it would have been Unix.
Unix is a computer virus with a user interface.
Contributor’s explanation for the proliferation of Unix:
Like any good drug dealer, AT&T gave away free samples of Unix to university types during the 1970s. Researchers and students got a better high from Unix than any other OS. It was cheap, it was malleable, it ran on relatively inexpensive hardware. And it was superior, for their needs, to anything else they could obtain.
Better operating systems that would soon be competing with Unix either required hardware that universities couldn’t afford, weren’t “free,” or weren’t yet out of the labs that were busily synthesizing them. AT&T’s policy produced, at no cost, scads of freshly
minted Unix hackers that were psychologically, if not chemically, dependent on Unix.
The reason why “Unix” is not used to name new version of Unix, besides the fact that AT&T owns the copyright:
These days, however, most vendors wouldn’t use the U-word if they had a
choice. It isn’t that they’re trying to avoid a lawsuit: what they are really
trying to do is draw a distinction between their new and improved Unix and
all of the other versions of Unix that merely satisfy the industry standards.
It’s hard to resist being tough on the vendors. After all, in one breath they
say that they want to offer users and developers a common Unix environment.
In the next breath, they say that they want to make their own trademarked version of Unix just a little bit better than their competitors: add a few more features, improve functionality, and provide better administrative
tools, and you can jack up the price. Anybody who thinks that the truth lies
somewhere in between is having the wool pulled over their eyes.
File deletion in Unix systems:
I too have had a similar disaster using rm. Once I was removing a file
system from my disk which was something like /usr/foo/bin. I was in /
usr/foo and had removed several parts of the system by:
% rm -r ./etc
% rm -r ./adm
…and so on. But when it came time to do ./bin, I missed the period.
System didn’t like that too much.
Unix wasn’t designed to live after the mortal blow of losing its /bin directory. An intelligent operating system would have given the user a chance to recover (or at least confirm whether he really wanted to render the operating system inoperable).
Please, please, please do not encourage people to overload standard
commands with “safe” commands.
(1) People usually put it into their .cshrc in the wrong place, so that
scripts that want to “rm” a file mysteriously ask for confirmation,
and/or fill up the disk thinking they had really removed the file.
(2) There’s no way to protect from all things that can accidentally
remove files, and if you protect one common one, users can and will
get the assumption that “anything is undoable” (definitely not true!).
(3) If a user asks a sysadm (my current hat that I’m wearing) to assist
them at their terminal, commands don’t operate normally, which is
frustrating as h*ll when you’ve got this user to help and four other
tasks in your “urgent: needs attention NOW” queue.
If you want an “rm” that asks you for confirmation, do an:
% alias del rm -i
AND DON’T USE RM! Sheesh. How tough can that be, people!?!
We now return you to your regularly scheduled “I’ve been hacking so
long we had only zeros, not ones and zeros” discussion…
Just another system hacker
Bad Error Message Humor:
People have published some of Unix’s more ludicrous errors messages as
jokes. The following Unix puns were distributed on the Usenet, without an
attributed author. They work with the C shell.
% rm meese-ethics
rm: meese-ethics nonexistent
% ar m God
ar: God does not exist
% “How would you rate Dan Quayle’s incompetence?
% ^How did the sex change^ operation go?
% If I had a ( for every $ the Congress spent, what would I have?
Too many (‘s.
% make love
Make: Don’t know how to make love. Stop.
% sleep with me
% got a light?
% man: why did you get a divorce?
man:: Too many arguments.
% ^What is saccharine?
%blow: No such job.
Dennis Richie fires back:
To the contributors to this book: I have succumbed to the temptation you offered in your preface: I do
write you off as envious malcontents and romantic keepers of memories. The systems you remember so fondly (TOPS-20, ITS, Multics,
Lisp Machine, Cedar/Mesa, the Dorado) are not just out to pasture,
they are fertilizing it from below.
Your judgments are not keen, they are intoxicated by metaphor. In
the Preface you suffer first from heat, lice, and malnourishment, then
become prisoners in a Gulag. In Chapter 1 you are in turn infected by
a virus, racked by drug addiction, and addled by puffiness of the
Here is my metaphor: your book is a pudding stuffed with apposite
observations, many well-conceived. Like excrement, it contains
enough undigested nuggets of nutrition to sustain life for some. But
it is not a tasty pie: it reeks too much of contempt and of envy.
|
OPCFW_CODE
|
🌺 Guru Purnima, a festival celebrated worldwide, is a special occasion to express gratitude towards our Gurus. A Guru plays a vital role in guiding us towards success in both life and career. Let's explore the significance of Gurus and how platforms like Google, YouTube, LinkedIn, Stack Overflow, ChatGPT, Hashnode, and AI tools act as virtual Gurus, delivering exceptional content and guidance. 📚💡
The impact of a Guru goes far beyond words; it resides in the profound gratitude that fills our souls." 🌟💖
💡 The Significance of the Guru 🙏:
🌟 Dispenser of Knowledge and Wisdom: Gurus possess profound knowledge and wisdom, generously sharing it with their disciples, nurturing their growth.
🌟 Mentor and Guide: Gurus provide invaluable mentorship, offering guidance, support, and motivation to overcome challenges and reach new heights.
🌟 Inspiration and Role Model: Gurus inspire us to achieve our full potential, serving as role models through their achievements and teachings.
💡 The Role of the Guru in Career Development 💼:
🌟 Expertise and Knowledge Sharing: Gurus share their expert knowledge, helping individuals gain expertise and understanding in their chosen fields.
🌟 Guidance and Mentorship: Gurus provide career guidance, assisting in decision-making, identifying strengths, and offering support throughout the professional journey.
🌟 Networking and Connections: Gurus often have extensive networks, introducing mentees to industry professionals, opening doors to new opportunities.
🌟 Emotional and Psychological Support: Gurus provide not only technical knowledge but also emotional and psychological support, fostering resilience and a positive mindset.
💡 Virtual Gurus: Platforms for Knowledge and Skill Enhancement 🌐:
🌟 Google: The ultimate search engine, Google, acts as a reliable source of information, offering answers to queries and valuable insights.
🌟 YouTube: YouTube's extensive video library hosts tutorials, lectures, and skill development resources, allowing users to learn from experts across various domains.
🌟 LinkedIn: As a professional networking platform, LinkedIn connects individuals with industry experts, thought leaders, and mentors, facilitating knowledge sharing and career development.
🌟 Stack Overflow: Developers rely on Stack Overflow, a community-driven platform, for technical guidance, problem-solving, and knowledge exchange.
🌟 ChatGPT: AI-powered chatbots like ChatGPT provide instant responses, personalized guidance, and support in various domains, serving as virtual Gurus.
🌟 Hashnode: Hashnode is a platform for developers, fostering knowledge sharing, discussions, and learning from industry experts, acting as a virtual Guru for continuous growth.
🌻 Guru Purnima reminds us of the invaluable role Gurus play in our lives and careers. They provide guidance, knowledge, and inspiration. In today's digital world, platforms like Google, YouTube, LinkedIn, Stack Overflow, ChatGPT, and Hashnode act as virtual Gurus, delivering remarkable content and connecting us with experts. Let's express gratitude to these Gurus, both physical and virtual, and embrace the power of learning and mentorship in our journey to success. 🌈🚀
🔍 Checkout GitHub Repository for projects:
|
OPCFW_CODE
|
M: Ask HN: Where to sell your profitable company - to_the_top
I am a student living in Canada, over the last 2 years I have built out a company in a niche market where at the time there were a couple of competitors (now around 5-6). 3 of them are VC funded.<p>I am doing this part time still and trying to earn my business degree but now I am finishing up and want to sell this business so I can try something new, the business generates around 2 million a year (CAD) in profits, so I am wondering what sort of 'investment banks' or people I should get in touch to sell this company.<p>I love working on this and would not mind working on it a more, the sad part is, being in Canada and not a active guy in the 'VC/Tech' scene so I don't know which route to take or who to approach.
R: exline
I've got to ask why sell? If it generates 2 million in profit a year, it
should be generating plenty of cash to bank roll your next endeavor.
Since you are the only one maintaining you can simply hire 2-3 people to do
the work you are currently working and keep the rest of the profit. Most
people here would kill to have a business that is generating 2 million in
profit.
R: lionhearted
Totally disagree. As soon as you're strongly think of selling, it means it was
time to sell yesterday. If you can get anything fair, do it. Once your heart's
not in it, it's good to move on and mentally cut ties. You won't be able to
keep innovating if you're not mentally with it, and nowdays businesses can
fall apart rapidly even if their founder is totally onboard, let alone if he's
not.
People who have never built a successful business won't understand. As soon as
you strongly feel it's time to sell, do it. Get out and build your next thing.
Later you might regret it, that's natural, but better than having things decay
on their own if you're not fully with it. My advice - yes, sell.
R: run4yourlives
> the business generates around 2 million a year (CAD) in profits,
Are you serious? In Canada? Why in god's name would you sell this? Hire
someone to do the dirty work for 50K and live off the rest until you grow old
and gray.
Why would you even think otherwise?
R: to_the_top
Forgot to mention, I built it up myself and still maintain it myself only and
that I am looking for a exit because I want to pursue something even bigger
with the funds I get from selling this.
R: amirmc
You might want to edit your original post to include this info.
R: enjo
Go straight to your well funded competitors. They'll love the chance to talk
acquisition more than likely.
R: josefresco
No so sure informing your competitors that you want out is such a great idea.
Depends highly on your market or product/service but I'd be weary of anyone
approaching me with the same offer.
R: shafqat
Can you share what it is? I can atleast intro you to some of my investors if
the business/product is compelling and you would at all be interested in
taking some investment and continue working on it? Or you can email me (see my
profile).
R: lizzy
There are private equity groups that acquire companies with profits of this
size. But I don't know the industry well enough to suggest names. I would try
and get some names from any investment bankers you know -- maybe alums from
your biz school if you don't know anyone personally.
R: johng
Can't help you with finding a VC, but I can tell you that others will be
interested in the following:
How will this business fare with you gone completely?
Is it something anyone can be trained to do?
If so, what barriers to entry are there to keep someone else from duplicating
what you've done without buying you out?
R: to_the_top
The business is 80% automated, the rest is just maint. work such as: sys
admin, emails, approving certain things etc. so yes someone can be trained
very easily. Barriers to entry... none really in the sense we do not
necessarily have much proprietary 'technology' except we have the users/the
revenue/infrastructure in place.
R: alexsherrick
Hey I'll help with the maint if you are looking for somebody ;)
R: amirmc
I suspect the most likely buyers would be: (a) A competitor in the same space
looking for market share or (b) A firm in an adjacent market looking to
diversify
Those are only my initial thoughts and there are probably other ways of
looking at it.
I'm guessing that one of the the three VC funded competitors could be a likely
acquirer. Given that there are three of them, you might even be able to create
a bidding war (assuming you have something worth bidding for, e.g tech, market
insight/share).
I'd do some research on each of those companies to make sure you have
something of value to them and then try to meet with the founders/CEOs
informally, if possible.
R: SriniK
Talk to a bank who can represent you. Typically they come up with a
pitchbook/deck and try to pitch to prospective companies. They charge anywhere
between 5-7% depending on your business. Best way to get to a banker who can
sponsor is to talk to a lawyer who is in the startup world.
If you want to send direct messages, do the pitching and negotiations
yourself, try linkedin paid service.
R: to_the_top
I have talked to td waterhouse private investment people and were going to be
setting up a deal. Going to look for startup lawyers on linkedin in my area.
R: paulhart
You say you're based in Toronto. Have a look at the people behind
<http://www.startupnorth.ca/> and get in touch with them, I'm sure they'd
either be interested themselves, or would be happy to make some introductions.
R: apalmblad
Where in Canada are you based? Your options may vary if you're in a big market
(TO, Vancouver, Montreal...) or not.
That said, I'd imagine your best route is to find local
investor/angel/business-mentor types who can recommend a firm that specializes
in getting companies acquired. I'm based in Vancouver, and my business partner
has definitely had getting-to-know-you meetings with a law firm - recommended
by an investor - that handles that sort of thing.
R: to_the_top
Only Tdot :)
R: dmillar
Have you been approached by any VCs in the past? I would revisit that if
possible.
In my experience, there is no shortage in VC contact for any, even modestly,
profitable business online. So, I am sure you've heard from at least a small
handfull.
R: amorphid
Talk to Benjamin Yosko => <http://www.instigatorblog.com>. He is very
approachable, lives/works in Montreal, and recently sold his startup. Tell him
Captain Recruiter says hello.
R: jeffepp
Ping me, I know a few people who may be interested...
|
HACKER_NEWS
|
The reason why people usually set up a VPN server is to maximize access to network sitting anywhere. Apart from providing safety and security by making sure to take care of the privacy the following benefits usually urge people to set up a VPN server.
- You prefer the benefits of the VPN service without the monthly payments.
- Avoiding the logging of your online activities by a VPN service.
- Having access to the home network sitting anywhere in the world.
- Access to local resources like your home Netflix account when in a different geographic location.
- Ability to give other people access to your home network.
There are 3 ways to set up a VPN server of your own.
The way of the setup completely depends on two things –
- What do you plan to do with the VPN
- The hardware that you will be using
The setup can happen using any of the 3 things you have –
- CLOUD SERVICE
1. Setup using Router
The VPN server setup is possible through a router if the router has built-in VPN capabilities. It is also possible if the router supports custom firmware like DD-WRT or OpenWrt.
2. Setup using your Computer
If installing via router isn’t possible, the second option to do so is through your own computer. All you would be needing is a spare computer. Setting up on a computer becomes easy if you are using a computer running Microsoft windows that will be having built-in VPN capabilities.
But if you use this option you need to keep in mind that you would be needing a computer that hosts your VPN server running as and when you wish to use VPN.
3. Setup on a Cloud Service
If you use this route you wont be needing any hardware. But while using this you will have to keep in mind that the cloud service will be able to see whatever you do with the VPN.
1.SETUP USING ROUTER
Some home or business routers might have a VPN server built into them. If that’s the case all you would be required to do is
- Log into the router
- Turn it on
- Configure the VPN server
If the router has such capability, a manual that comes along with it should guide you through the process.
There is also a possibility that your router doesn’t have an in-built VPN server but supports one. This typically means that you would have to install third-party firmware on the router. Once the right firmware is installed, the router will be able to function.
Three main and most popular ones are –
This is a Linux-based firmware for wireless routers and access points.
This custom firmware is for consumer-grade computer networking routers and gateways which are powered by Broadcom Chipsets.
It is an open-source project used for embedded operating systems that are based on Linux that is primarily used on embedded devices to route the network traffic.
2.SETUP USING COMPUTER
If you find the router supporting third-party firmware risky, you can always install a VPN server on one of your computers. Doing this will allow access to the files that host the VPN Server.
The simplest way is to use a Windows or Mac desktop that you can leave running 24 hours a day. The reason being you don’t want the computer to turn off when you need it.
3.SETUP ON A CLOUD SERVICE
The best part about a cloud service is that it doesn’t need any hardware. You won’t be needing a router or hacking your router through third-party firmware nor would you be needing a computer to play the role. This way it is pretty convenient.
But along with being convenient, it comes with its own set of drawbacks.
Firstly, while using cloud service, to set up a VPN server you will have to install software on a virtual machine running on cloud services servers. This will result in making your VPN server slow as it will be running on distant hardware.
Secondly, this also means that you will have to trust the cloud service blindly. As everything you do will happen on their hardware with the help of their virtual machines. Sadly, this leaves you with the same issue as using a commercial VPN service – that you need to trust a third-party.
|
OPCFW_CODE
|
Introducing Plus Codes in Place Autocomplete, Place Details and Geocoding to help you serve users everywhere
Software Engineer, Plus Codes
Miguel Ángel Vilela
Technical Solutions Engineer, Google Maps Platform
Try Google Maps Platform
Unlock access to real world data and insights with a monthly $200 Google Maps Platform credit.Get started
Although we’re constantly mapping the world–adding addresses, points of interest, roads, and more–there are many places without traditional addressing systems and some areas with no addresses or road names at all. This can be a challenge for businesses such as food delivery or on-demand transportation, that rely on precise locations to deliver services in these regions. And being able to receive goods and services at your specific location is particularly important right now.
But first, what’s a Plus Code?
A Plus Code is a simple alphanumeric code which can be combined with a locality (for example: CWC8+R9 Mountain View), derived from latitude and longitude coordinates. Anyone can find the Plus Code for any location in the world by dropping a pin in Google Maps and they’re searchable on Google Search and Google Maps. Once someone knows the Plus Codes for their frequented locations, they’re able to use them just like a traditional address.
Plus codes in Place Autocomplete
Now that we’ve integrated Plus Codes into Place Autocomplete, when your users want to call a ride, have lunch delivered, or use any other location-based service you’ve developed, your application will automatically start returning Plus Code suggestions once the first letter of the town or locality in a Plus Code address is typed.
Plus Codes in Geocoding and Place Details
The process doesn’t end there. Once your user has selected the autocompleted Plus Code, you can use either Place Details or Geocoding to convert the Plus Code’s Place ID into geographic coordinates. Alternatively, this same Place ID can be used directly in the Directions API and Distance Matrix API to quickly dispatch a driver, schedule delivery, and more.
Here's a sample code snippet of how we implement the above scenario, combining programmatic Place Autocomplete with a Geocoding API request to get the geographic coordinates of an autocomplete-generated Plus Code entered by the user. You can see the same in full context and find other implementation options on GitHub.
Geocoding now also accepts Plus Codes as the
address request parameter and will return the same Plus Code as a fully populated Geocoding result. For example, a Geocoding request with
address=GCG2%2B3M%20Kolkata will return a result with the Plus Code formatted as both a global code and as a compound code, along with
With Plus Codes in Place Autocomplete, Place Details, Directions and Geocoding, you’re able to pick up or drop off your users and deliver food to their table no matter where in the world they are–helping businesses serve users where they currently operate and expand into new regions worldwide.
For more information on Google Maps Platform, visit our website.
|
OPCFW_CODE
|
Understanding Stratified sampling in numpy
I am currently completing an exercise book on machine learning to wet my feet so to speak in the discipline. Right now I am working on a real estate data set: each instance is a district of california and has several attributes, including the district's median income, which has been scaled and capped at 15. The median income histogram reveals that most median income values are clustered around 2 to 5, but some values go far beyond 6. The author wants to use stratified sampling, basing the strata on the median income value. He offers the next piece of code to create an income category attribute.
housing["income_cat"] = np.ceil(housing["median_income"] / 1.5)
housing["income_cat"].where(housing["income_cat"] < 5, 5.0, inplace=True)
He explains that he divides the median_income by 1.5 to limit the number of categories and that he then keeps only those categories lower than 5 and merges all other categories into category 5.
What I don't understand is
Why is it mathematically sound to divide the median_income of each instance to create the strata? What exactly does the result of this division mean? Are there other ways to calculate/limit the number of strata?
How does the division restrict the number of categories and why did he choose 1.5 as the divisor instead of a different value? How did he know which value to pick?
Why does he only want 5 categories and how did he know beforehand that there would be at least 5 categories?
Any help understanding these decisions would be greatly appreciated.
I'm also not sure if this is the StackOverFlow category I should post this question in, so if I made a mistake by doing so please let me know what might be the appropriate forum.
Thank you!
Not a programming question, hence arguably off-topic here; better suited for Cross Validated.
You may be the right person to analyze more on this based on your data set. But I can help you understanding stratified sampling, so that you will have an idea.
STRATIFIED SAMPLING: suppose you have a data set with consumers who eat different fruits. One feature is 'fruit type' and this feature has 10 different categories(apple,orange,grapes..etc) now if you just sample the data from data set, there is a possibility that sample data might not cover all the categories. Which is very bad when train the data. To avoid such scenario, we have a method called stratified sampling, in this probability of sampling each different category is same so that we will not miss any useful data.
Please let me know if you still have any questions, I would be very happy to help you.
|
STACK_EXCHANGE
|
Query to show all blog posts while linking in categories
So I have a query that should get all posts from a table while linking the posts categories and the user that created the post. What I am getting though is just a single post returned, not all posts. Below is the schema:
Posts
=====
id
Categories
==========
id
Post categories
===============
postID
categoryID
And here is the SQL code I have so far, kinda deep but it gets all categories concatenated into a single field.
SELECT
blgpostcategories.*,
blgcategories.id,
GROUP_CONCAT(blgcategories.name) AS categories,
blgposts.*,
users.firstName,
users.id AS usersId,
users.lastName,
users.email
FROM blgposts
RIGHT OUTER JOIN blgpostcategories
ON blgposts.id = blgpostcategories.postID
RIGHT OUTER JOIN blgcategories
ON blgpostcategories.categoryID = blgcategories.id
INNER JOIN users
ON blgposts.userID = users.id
UPDATED Query from JNK - Still only returning a single row :-(
SELECT
blgpostcategories.*,
blgcategories.id,
GROUP_CONCAT(blgcategories.name) AS categories,
blgposts.*
FROM blgposts
LEFT OUTER JOIN blgpostcategories
ON blgposts.id = blgpostcategories.postID
LEFT OUTER JOIN blgcategories
ON blgpostcategories.categoryID = blgcategories.id
Not sure, I thought this was the way to approach it. Should I try inner join? I am absolutely positive users is populated.
I added an answer. You want a LEFT JOIN I think.
In an answer by Adam Robinson to a similar question
Because you're using an aggregate in
your query (GROUP_CONCAT), your query
is being grouped. Since you have no
group by clause, your group is the
entire result set (hence seeing every
tag the author has used). Because
MySQL allows for using non-grouped
columns in grouped statements, you
aren't getting an error, but you
aren't getting the query that you
want.
In order to retrieve the proper
results, you need to group your query
on thread.id.
In your case just adding GROUP BY blgcategories.id should do it
This did it! Main difference is I did GROUP BY blgposts.id
Two things I see right off the bat:
1 - Do a LEFT OUTER JOIN not a RIGHT. RIGHT means "show me all the stuff in the right table, whether or not I have anything matching in the left table." You want everything from blogposts so do a left.
2 - Your INNER JOIN may be an issue as well. Are you sure users is populated fully?
EDIT:
The issue is you are using an aggregate function without a GROUP BY! Take out the GROUP_CONCAT() and it should work fine.
See my above edited query. Still feel very n00bish to this query stuff :-(
@Nathan - if you do a SELECT * FROM Blogposts how many rows do you get?
@JNK 2 rows are returned...I am thinking it has something to do with the whole joining of my categories etc. hmm...
@nathan - see my edit. You get funny results when you use aggregates without a GROUP BY. Most databases won't let you do this but MySQL does...
@JNK How would I go about getting the multiple categories that each post can reside in then? I am using the GROUP_CONCAT() to get all categories in a single row without repeating my post data.
@Nathan Stanford II -- Add a GROUP BY clause See this Post
@Nathan - add a GROUP BY, probably on your Primary Key for the Blogposts table.
|
STACK_EXCHANGE
|
Enforcing a business rule operating on aggregate root collections
I have 2 Aggregate Roots (ARs) in my domain:
Product - I guess the name says it all - it's the thing the partner sells. It belongs to:
Partner - the seller.
It's a multitenant system and one of its assumptions is that a Partner can have hundreds of thousands of products depending on a limit - if company A wants to sell only 100 products, they buy the cheapest, Bronze Pass and if they want to sell 100 000 products, they buy the Gold Pass. During the product import process, those rules should be enforced, so at any given moment no partner should be able to keep more products in our system than the limit rule allows.
The Product AR is a big one - it contains ~10 business rules and ~12 ValueObjects. The Partner AR is a small one - it contains only the Id and current limit value.
How the import process looks from the code perspective:
I'm loading the Partner from the database and I assign it with a collection of ids of currently active products. Then, when the product data comes in, I first run it through a method: Partner.LimitProducts(Products) that returns a list of products allowed to be put in the database.
I didn't make products as a strict part of the partner aggregate (so an entity belonging to it) for two reasons:
Performance - I'm using a NoSql - document database and it wouldn't handle keeping huge documents well.
Common sense - it's no accident that the product AR is so big, as 99.999% of the action happening in the microservices being discussed is only about a product and it never even touches the Partner - apart from creating new products, that is.
However I see some problems with this approach. To me, DDD is all about making strong guarantees in the aggregates (the invariants), however when I use one AR as a kind of a filtering layer to the other one, in this one case, it immediately forces me to put the coordination logic a layer above the two ARs involved and this just looks to me that half of the invariant is in a wrong place (which would be the application layer), but where should it be? I considered creating a domain service to place the coordination logic inside or even move the limit business rule there, but I don't know if I like that...
Do any of you see an alternative design for this?
If the partner aggregate only has the limit, that suggests to me that you either:
give the partner aggregate responsibility for the coordination (e.g. require that product creation (and deletion, for whatever deletion means in your domain) go through the partner aggregate)
get rid of the partner aggregate because it's serving no purpose in your domain
I would probably have the partner aggregate keep (ideally in a set, if your language/DB can efficiently maintain such) track of which products (only the ID is needed) are associated with the partner. If that proves to be too big to persist reliably, I would then shard the product set and effectively make each shard its own aggregate (you could get really clever and adaptively shard the aggregate).
That your description says "no partner should be able to keep more products" also strongly suggests that product creation/loading goes through the partner aggregate.
I thought about this approach and there's one reason I don't like it: the product aggregate list will only go through the partner aggregate if new products are added, so that the number of products exceeding the partner limit will not be passed through. However, in every other case (and as I said, there's 10+ product actions and there will be more) partner aggregate stays put, as the actions are performed only on products.
Effectively making partner manage their product in this situation will only lead to reduntant partner aggregate methods.
My current idea is to make the partner just an entity - kind of a configuration data bag, as that's what it mostly is - and always push new products through a domain service enforcing that limit rule. Seems procedural and something feels wrong, but I think it's the only way to make it both - storage optimal and readable.
|
STACK_EXCHANGE
|
On BigSur, Windows10-VM freezes at Start Boot Option
Describe the issue
After installing Windows10-ISO i start the VM. But it does not booting. VM freezes at Start Boot Option.
Configuration
UTM Version: Version 2.0.27 (27)
OS Version: BigSur 11.3.1
Intel or Apple Silicon? Intel i3 Quad-Core
Upload VM
config.plist
BuildMachineOSBuild
19H524
CFBundleDevelopmentRegion
en
CFBundleDocumentTypes
CFBundleTypeName
UTM virtual machine
CFBundleTypeRole
Editor
LSHandlerRank
Owner
LSItemContentTypes
com.utmapp.utm
LSTypeIsPackageLSTypeIsPackage
CFBundleExecutable
UTM
CFBundleIconFile
AppIcon-macOS
CFBundleIconName
AppIcon-macOS
CFBundleIdentifier
com.utmapp.UTM
CFBundleInfoDictionaryVersion
6.0
CFBundleName
UTM
CFBundlePackageType
APPL
CFBundleShortVersionString
2.0.27
CFBundleSupportedPlatforms
MacOSX
CFBundleVersion
27
DTCompiler
com.apple.compilers.llvm.clang.1_0
DTPlatformBuild
12B45b
DTPlatformName
macosx
DTPlatformVersion
11.0
DTSDKBuild
20A2408
DTSDKName
macosx11.0
DTXcode
1220
DTXcodeBuild
12B45b
ITSAppUsesNonExemptEncryption
LSApplicationCategoryType
public.app-category.business
LSMinimumSystemVersion
11.0
NSAccentColorName
AccentColor
UTExportedTypeDeclarations
UTTypeConformsTo
com.apple.package
UTTypeDescription
UTM virtual machine
UTTypeIcons
UTTypeIconText
UTM
UTTypeIdentifier
com.utmapp.utm
UTTypeTagSpecification
public.filename-extension
utm
Thanks for this, it contains most of the information I need. Firstly, it looks like you are trying to start the ARM version of Windows 10 on your Intel Mac mini. Did you mean to do this or would you be happy with the x86_64 (Intel) version of Windows 10?
By using the ARM version, what happens technically is that UTM needs to do lots of work behind the scenes to emulate the ARM CPU on your Intel i3, which means the outcome is very slow performance. I suspect that the VM would eventually boot up if you waited for a long time.
To download Windows 10 Intel version, go to Microsoft.com.
The configuration is not right. Please try these VM templates, click here for Windows 10 and here for Ubuntu.
@conath is this windows template supposed to work on MacOS as well ? when i tried it stops at the prompt without booting the attached installation win10 iso. The iso i tried is x86 and i'm trying on a macos x86 version.
And the source for these templates are (the host) is it something you maintain is it ? just wondering the the safety :).
@conath is this windows template supposed to work on MacOS as well ? when i tried it stops at the prompt without booting the attached installation win10 iso. The iso i tried is x86 and i'm trying on a macos x86 version.
Yes, it's supposed to work on Intel Mac host. On my Mac, I can download that template, double click and then choose the ISO for the DVD drive. When I start the VM after that, it shows a couple of messages and then Windows starts with "Press any key to boot from CD/DVD". You need to press a key (e. g. space) to start booting the installer.
If yours gets stuck at EFI shell, you can try to press esc at the UTM logo screen and then choose Boot Manager, then select UEFI QEMU DVD-ROM to boot from the ISO. (you will still be prompted to press any key to boot)
And the source for these templates are (the host) is it something you maintain is it ? just wondering the the safety :).
Chrisp.cafe is maintained by me.
@conath Thanks, i tried your template, but i didn't workout. I also downloaded the virtio storage drivers and load it to installer, but the installer always crash with the error which says something about not equal. I think i saw the same message in one of the issues here.
Then i tried installing qemu through brew and use the following commands in https://superuser.com/questions/905815/how-to-setup-windows-7-in-qemu-on-mac and it worked for windows 10 as well. I tried additionally passing -accel hvf and the installation went just fine.
Is there any reason why qemu under UTM didn't worked ?
|
GITHUB_ARCHIVE
|
Functional vs Object-Oriented vs Procedural Programming
Ok, so first this article is a copy from below links. As I also was asked “what is programming?” in an interview, and I did not answer it properly/correctly. I put it here to record my sucks/weak time in case to inspired myself.
During an interview this week I’ve been asked “what is the difference between Functional, Object-Oriented and Procedural Programming” and to be honest I did not really know how to answer to that question (and it was the first time I was hearing about Procedural Programming…). So today, I decided to do some research and (try to) understand what are the main differences.
Let’s start with some definitions:
“ Functional programming (FP) is a programming paradigm — a style of building the structure and elements of computer programs — that treats computation as the evaluation of mathematical functions and avoids changing-state and mutable data.” — Wikipedia
“ Object-oriented programming (OOP) is a programming paradigm based on the concept of “objects”, which may contain data, in the form of fields, often known as attributes; and code, in the form of procedures, often known as methods.” — Wikipedia
“ Procedural programming is a programming paradigm, derived from structured programming, based upon the concept of the procedure call. Procedures, also known as routines, subroutines, or functions, simply contain a series of computational steps to be carried out.” — Wikipedia
Ok, so that’s an introduction but let’s go in a little bit more details.
First, what is a programming paradigm? It is a style of programming, a way of thinking about software construction. A programming paradigm does not refer to a specific language but rather to a way to program, a methodology. Some languages make it easy to write in some paradigms but not others. For more details, see this Wikipedia article.
Now let’s dive into the three programming paradigms that interest us today:
Procedural programming (PP), also known as inline programming takes a top-down approach. It is about writing a list of instructions to tell the computer what to do step by step. It relies on procedures or routines.
Object-oriented programming (OOP) is about encapsulating data and behavior into objects. An OOP application will use a collection of objects which knows how to perform certain actions and how to interact with other elements of the application. For example an object could be a person. That person would have a name (that would be a property of the object), and would know how to walk (that would be a method). A method in OOP can be considered as a procedure in PP, but here it belongs to a specific object. Another important aspect of OOP are classes. A class can be considered as a blueprint for an object.
Functional programming (FP) is about passing data from function to function to function to get a result. In FP, functions are treated as data, meaning you can use them as parameters, return them, build functions from other functions, and build custom functions. Functions in FP have to be pure functions, they should avoid shared state, and side effects and data should be immutable. A pure function is a function that given the same type of input will always return the same output, it is not dependent on a local or global state. A shared state is a state that is shared between more than one function or more than one data-structure. So with shared state, in order to understand the effects of a function, you need to know all the details of every shared variable. It adds a lot of complexity and permits less modularity.
That’s it for today! Next time I (and hopefully you) am asked what is the difference between Functional, Object-Oriented and Procedural Programming, I will at least be able to explain the big lines of each.
If you have any additional explanations, or any comments please do not hesitate to add them below.
|
OPCFW_CODE
|
Our lab works on insect communication systems, with a focus on the katydid genus Neoconocephalus. Most projects fall into one of two overlapping research areas – neurobiology and evolution.
Neoconocephalus includes over 30 species distributed throughout temperate and tropical areas of the western hemisphere, about half of which live in North America or the Caribbean. Morphologically the species are highly similar and often difficult to distinguish, but each species is recognizable by the male call. Females use the spectral and temporal patterns of male calls to recognize and localize conspecific males. Calls vary in frequency, pulse rate, pulse patten (e.g. single versus double pulses), and verse structure. This great diversity in male calls across species raises interesting questions about the evolution of communication systems and the role of call diversification in speciation. Given that a communication system can only function if the sender and receiver are matched (i.e. if the male produces a call that females are able to recognize), it is perhaps surprising that communication systems are so variable across species. We are interested in understanding how changes in senders and receivers transpire over evolutionary time, e.g. Do changes in the male call precede changes in the female preference, or vice versa? Why do closely related species tend to assess different parameters of the call (e.g. pulse rate versus pulse duration), rather than simply being tuned to different values of the same parameter? What are the selection pressures that lead to call synchronization in species that produce calls with verses? What role does the communication system play in speciation events? How do new call trait or new call preferences evolve?
Addressing evolutionary questions entails recording, analyzing, and describing male calls of each species, identifying the parameters of interest to females, and determining the phylogenetic relationships among species within the genus. We are focusing on the North American and Caribbean species and are making significant progress in answering many of these questions.
One task that every nervous system must perform is to separate relevant from irrelevant information. An example of this phenomenon in the auditory system is ‘the cocktail party effect,’ in which a person can listen to a single conversation in a crowded room, ignoring all other conversations within earshot until a highly relevant signal – such as one’s name – suddenly becomes salient out of the background. Katydids must perform a similar task when engaged in phonotaxis toward a potential mate while listening in the background for the echolocation cries of bats, which are an important predator of katydids. Auditory Stream Segregation is the separation of different types of acoustic input into separate ‘tracks’ according to relevance or information content. Our lab was the first to demonstrate Auditory Stream Segregation in an invertebrate, and in 2007, we received a new NSF-award to study how bat signals within a complex auditory scene are detected and processed at cellular and behavioral levels. We use a variety of methods (e.g. extra- and intracellular recordings, electrical stimulation, calcium imaging) to study how one interneuron segregates bat cries from the acoustic background to test our hypothesis that dendritic rather than presynaptic processes are involved. In psychoacoustic experiments, we test how masking influences the detection and processing of bat cries. In behavioral experiments, we test how the context (e.g. reproductive state, or presence versus aabsence of attrative males) changes bat avoidance responses by flying katydids. Defects in Auditory Stream Segregation and other processes involved in separating sensory information have been implicated in various human disorders, including schizophrenia. If we can understand how Audiotry Stream Segregation occurs in the simple nervous system of an insect, we may gain a better understanding of how this process functions in the more complicated brains of vertebrates, including humans.
We are also embarking on a project to understand how temporal selectivity is generated in the nervous system. What are the neuronal differences between a species whose females are selective for a particular pulse rate, and a species whose females are selective for (or against) particular durations of inter-pulse intervals? We have been exploring a model in which temporal selectivity for pulse rate is generated by intrinsic resonances of the plasma membrane of neurons within the CNS. We propose that membrane potential oscillates with a frequency that corresponds to the preferred pulse rate of the male calls. Using psychoacoustic experiments, we have obtained behavioral evidence for resonance in three different katydid species representing a wide range of preferred pulse rates, and we have collaborated with electrical engineers to model the combinations of ion channels that could generate this resonance. Future work will be directed at 1) identifying the brain neurons that are involved in this process, and 2) determining how these membrane processes could generate selectivity for temporal properties other than pulse rate (e.g. pulse or interval duration).
|
OPCFW_CODE
|
OTR: add has_part links between autophagy and organelle disassembly
As discussed at autophagy call today. See https://github.com/geneontology/go-ontology/issues/12662: "Paola will make has part relations between the specific types of autophagy and the disassembly terms. On the conference call this morning we decided that this is the best way to proceed since it would account for a necessary step in the autophagy process, but would also account for cases where cellular components are recycled.".
@ukemi @marcfeuermann @RLovering @Pauldenny
I propose to add the following links, please review and let me know if you have any concern. In all cases below, I think we should use ends_with specifically, rather than the broader has_part, but let me know if you disagree:
[ ] ‘mitophagy’ (“The autophagic process in which mitochondria are delivered to the vacuole and degraded in response to changing cellular conditions.”) ends_with ‘mitochondrion disassembly’
[ ] ‘macromitophagy’ (“Degradation of a mitochondrion by macroautophagy.”) ends_with ‘mitochondrion disassembly’
[ ] ‘microautophagy of mitochondrion’ (“Degradation of a mitochondrion by lysosomal microautophagy.”) ends_with ‘mitochondrion disassembly’ (“The disaggregation of a mitochondrion into its constituent components.”)
[ ] ‘pexophagy’ (“The process in which peroxisomes are delivered to the vacuole and degraded in response to changing nutrient conditions.”) ends_with ‘peroxisome disassembly’
[ ] ‘macropexophagy’ (“Degradation of a peroxisome by macroautophagy.”) ends_with ‘peroxisome disassembly’
[ ] ‘micropexophagy’ (“Degradation of a peroxisome by lysosomal microautophagy.”) ends_with ‘peroxisome disassembly’
[ ] require creation of ‘peroxisome disassembly’
[ ] ‘nucleophagy’ (“A selective form of autophagy, by which damaged or non-essential parts of the nucleus, or even an entire nucleus is degraded.”) => can’t really add ends_with ‘nuclear part disassembly’, but we could do
[ ] ‘piecemeal microautophagy of nucleus’ (“Degradation of a cell nucleus by lysosomal microautophagy.”) ends_with ‘nucleus disassembly’
[ ] requires creation of ‘nucleus disassembly’
[ ] ‘glycophagy’ (“The autophagic process in which cellular glycogen is delivered to the vacuole and degraded in response to changing cellular conditions.”) ends_with ‘glycogen catabolic process’
[ ] ‘lipophagy’ (“The autophagic process in which lipid droplets are delivered to the vacuole and degraded in response to changing cellular conditions.”) ends_with ‘lipid particle disassembly’
[ ] requires creation of ‘lipid particle disassembly’
[ ] ‘reticulophagy’ (“The autophagic process in which parts of the endoplasmic reticulum are loaded into autophagosomes, delivered to the vacuole, and degraded in response to changing cellular conditions.”) ends_with ‘endoplasmic reticulum disassembly’
[ ] requires creation of ‘endoplasmic reticulum disassembly’
[ ] ‘ribophagy’ (“The process in which cells degrade mature ribosomes under conditions of starvation”) ends_with ‘ribosome disassembly’
Thanks.
I don't think ends with is the correct relation because after disassembly, the components are catabolized. I think it is safer to go with has_part.
@ukemi
Sure, has_part then. I'll implement - just leaving it open for a bit in case @marcfeuermann or @Pauldenny have any comment. (I think Ruth is on leave this week.)
I'm happy to leave these decisions to David and Paola
I agree with David - I think has_part makes more sense.
@ukemi
(and @marcfeuermann @RLovering @Pauldenny)
FYI, this is all done. The only case where I opted for ends_with, instead of has_part, was
‘glycophagy’ (“The autophagic process in which cellular glycogen is delivered to the vacuole and degraded in response to changing cellular conditions.”) ends_with ‘glycogen catabolic process’.
Awesome. This makes perfect sense.
|
GITHUB_ARCHIVE
|
I know this will come off silly I am sure but I was hoping someone could help explain some Windows Experience Scores to me. I made a rookie mistake when looking at newegg's deal of the day and bought a video-card that is lower than my current one. (luckily it was only $10)
What sucked me into it was that the video card already in my computer was a Zotac Geforce GT 220 with 512MB DDR2, and the new one was a Gigabyte GeForce 210 with 1GB of DDR3. Obviously here I attached to the more/better memory on the card. After installing the new card (I didn't know my mistake at this point) I re-ran the Windows Experience and it lowered my Desktop Performance for Windows Aero from a 5.2 to a 5.1. So I put the old card back but left the HDMI cord plugged into my motherboard, re-ran the score and now its at a 6.2.
In short I was curious which setup was best at the end of the day and should I even have any of these videocards plugged into my computer if they are apparently just lowering the score. Second part of that is how valid is that score to real world applications (I run games like SCII and Minecraft on this computer, as well as ArcGIS software for 2D and 3D mapping)
The hardware I am working with is
- Gigabyte Z68XP-UD3 Motherboard
- Intel Core i5-2500K
- Zotac GeForce GT220 (512MB DDR2)
- Gigabyte GeForce 210 (1GB DDR3)
Well just to bring up something, the Intel 3000 video you speak of is integrated into the CPU. which in my opinion the GeForce cards are better. I don't prefer onboard CPU graphics....but it helps to accelerate the other video cards
That is part of the reason I was kind of confused about the apparent drop in score between just being plugged into the Motherboard vs plugged into the card. I don't understand why that alone would change the index score by a full point. Does changing where I am plugged in change where the computer goes to test the graphic capabilities?
That hierarchy chart shows the 220 ddr2 model many tiers higher. I usually disable onboard graphics when using a discrete graphics card. But my guess is that yeah, when you plug in your monitor to the motherboard's connector you are using the onboard graphics. Look closer at the scores. The 220 should beat it in the 3d gaming category. Desktop/Aero performance should be irrelevant unless you notice an actual problem.
So at the end of the day I should probably leave it plugged into the video card then and ignore the WEI scores for this (oddly the 3d stayed the same either way...it was down around 5.5 to begin with, then after I installed the 210 card, realized my mistake and put my old 220 back the 3d jumped up to 6.3 and has stayed there no matter which place it is plugged into)
It sounds like you are getting three scores that are based on 3 GPUs (one of which is the HD3000 (iGPU). the proble with WIE when you have two GPUs, The iGPU (HD3000) and a dGPU (the 210, or 220) is figuring out WHICH GPU the wei is related to. Is it testing the iGPU, or is it testing the dGPU.
In My laptop I have the iGPU (HD3000) and a 540M dGPU. If the application is a 3D app then the 540 dGPU is used, if the application is a 2 D app then the HD3000 iGPU is used. Windows during it's test may test the HD3000 even though a dGPU is installed.
I think it depends on the settings in windows.
Try your Game and see if there is a difference. NOTE: if you plug into the HDMI output from the MB, you are using the iGPU.
|
OPCFW_CODE
|
If you are a front-end developer that has dabbled a little bit in node and like it, you probably need to build a list of tools (npm modules etc) you can use in your day-to-day development.
Here’s a small list of things I find I often reuse in different projects.
anything in a require() is an npm module, installable via npm install name
Working with files
- require(‘fs’); – obvious but the default node fs is useful
- require(‘fs-extra’); – an extended file system util that lets you do a lot more, like make files and directories and make files in paths that don’t exist yet, work with JSON etc.
- require(‘watch’); – an awesome file system watch tool you can configure to fire events on file change, creation etc. with various patterns and callbacks.
Headless browsers and DOM
Being in node does not mean you cannot work on a DOM or on a DOM-like abstraction to either open HTML documents, parse them etc.
- require(‘cheerio’); – a light pseudo-DOM implementation that can load a document and allow you to use a jQueryLite-like version to query and manipulate it. For those that have given up on jsdom.
- PhantomJS – An actual headless implementation of webkit. PhantomJS is very useful for testing and CI. Powers frameworks like CapserJS
Working with Class
Goes without saying but. It’s much nicer to use a more classical OOP interface in your nodejs modules.
- require(‘mootools’); – mootools-server, comes with Class (global) as well as the mixins Events, Options and some prototype enhancements to String etc. As you’d expect. No return value, just
require(‘mootools’)then use as per client.
- require(‘prime’); – Kamicane and MooTools’ next gen micro lib that does OOP style classes but is more lightweight. Don’t forget to also install Arian’s prime-utils for some sugar like supers, setOptions etc.
- require(‘primish’); – My own version of prime, which does a lot on top of prime to make it nicer and easier to use in the browser, as well as sugar around emitter, supers and options
Working with CLI
Working on build tools often requires you to do several things: process arguments and format stuff nicely.
- require(‘clintish’); – Clintish is a fork of
clint– also by Kamicane, it’s a micro processor for arguments with events and parsers, helper syntax etc. Very nifty.
- require(‘colors’); – a very clever implementation that prototypes the String native with some getters for default colour names. Allows you to do stuff like
console.log(“hello”.red + ” there”.blue);, no return value – just require once.
In the next part, I will cover a lot of tools like growl notifiers, build tools / minifiers, linters, (trans)compilers, HTTP / socket servers, streaming and so forth.
|
OPCFW_CODE
|
get_independent_GMM_with_shift_2 - Finds a Gaussian mixture model (GMM) for data possibly containing discrete
Example compile flags (system dependent):
-DLINUX_X86_64 -DLINUX_X86_64_OPTERON -DGNU_COMPILER
-lKJB -lfftw3 -lgsl -lgslcblas -ljpeg -lSVM -lstdc++ -lpthread -lSLATEC -lg2c -lacml -lacml_mv -lblas -lg2c -lncursesw
const Matrix *feature_mp,
const Vector *initial_delta_vp,
const Vector *initial_a_vp,
const Matrix *initial_u_mp,
const Matrix *initial_var_mp,
random global shifts in feature dimensions.
This routine finds a Gaussian mixture model (GMM) for the data on the
assumption that the features are independent. It allows for the possibility
of a data point being shifted by a random discrete amount after having been
generated from its Gaussian. The shifts are assumed to be independent of the
Gaussians from which the data points are generated. Unlike the counterpart
routine, the shifts are not necessarily assumed to occur with wrap-arounds.
Instead, the shifts could result in any arbitrary values into the feature
dimensions that free up due to the shift. The model is fit with EM. Some
features are controlled via the set facility.
This routine performs subspace clustering in that it considers only a subset
of feature dimensions that are guaranteed to be not prone to corruption by
noise due to the assumed nature of shift. The subspace of feature dimensions
is determined by the max_left_shift and max_right_shift parameters as
In particular, it fits:
p(x) = sum sum a-sub-i * delta-sub-j * g(u-sub-i, v-sub-i, x(- s-sub-j))
where a-sub-i is the prior probability for the mixuture component (cluster),
u-sub-i is the mean vector for component i, v-sub-i is the variance for the
component, and g(u,v,x) is a Gaussian with diagonal covariance (i.e., the
features are assumed to be independent, given the cluster). delta-sub-j is
the prior probability of shift j and x(- s-sub-j) indicates a global reverse
(negative sign) shift of x by the amount corresponding to s-sub-j.
max_left_shift and max_right_shift specify the maximum amount of global
discrete random left and right shift respectively a data point can experience
after being generated from its Gaussian. Unlike the counterpart routine, each
of these parameters can have only non-negative values. The total number of
possible shifts for any data point is S = (max_left_shift + max_right_shift + 1)
including the zero shift.
Based on max_left_shift and max_right_shift, a subspace of the entire
feature space exists that is guaranteed to be unaffected by the arbitrary
noise that a random shift introduces. It is of dimension
T = M - (max_left_shift + max_right_shift), where M is the dimensionality of
the full feature space. So, the EM procedure determines clusters in this
subspace rather than the full space.
The argument num_clusters is the number of requested mixture components
The data matrix feature_mp is an N by M matrix where N is the number of data
points, and M is the number of features.
The model parameters are put into *delta_vpp, *a_vpp, *u_mpp, and *var_mpp. Any of
delta_vpp, a_vpp, u_mpp, or var_mpp is NULL if that value is not needed.
The vector *delta_vpp contains the inferred probability distribution over
shifts computed using all the training data points. It is of size S. The
elements of *delta_vpp can be viewed as shift priors. The assumed order of
shifts in this vector or any other output pertaining to shifts is:
(max_left_shift, max_left_shift-1,...., 0,...., max_right_shift-1, max_right_shift)
The vector *a_vpp contains the inferred cluster priors. It is of size K.
Both u-sub-i and v-sub-i are vectors, and they are put into the i'th row of
*u_mpp and *var_mpp, respectively. The matrices are thus K by T.
If P_cluster_mpp, is not NULL, then the soft clustering (cluster membership) for each
data point is returned. In that case, *P_cluster_mpp will be N by K.
If P_shift_mpp is not NULL, then the posterior probability distribution over
the possible discrete shifts for each data point is returned. In that case,
*P_shift_mpp will be N by S.
Initial values of the parameters to be used as the starting values for the EM
iterations can be specified using initial_delta_vp, initial_a_vp,
initial_u_mp and initial_var_mp. If they are all NULL, then a
random initialization scheme is used. It is assumed that the initial
parameters are specified either in the full feature space or the reduced
space in which the final clusters are sought. In case of full space, the
routine retrieves the parameters corresponding to the target subspace.
If the routine fails (due to storage allocation), then ERROR is returned
with an error message being set. Otherwise NO_ERROR is returned.
This software is not adequatedly tested. It is recomended that
results are checked independantly where appropriate.
Prasad Gabbur, Kobus Barnard.
|
OPCFW_CODE
|
NOT white and gold. It's obvious.
I'm currently working on some major interface improvements to the Ars Linguarum page. Instead of a barebones button + textarea combo, it's going to have some overdone Bootstrap UI (~classic web developer~ amirite?).
I've been working on cleaning up Ars Linguarum. The most recent backup (made a minute or so ago) is very messy. Nouns, adjectives, pronouns and verbs are all represented pretty differently. Some use a plain old constructor, one uses a static .parse() method, and the other returns a function that just calls a static allForms function from ArsLinguarum.verbs.conjugation with arguments from the parsed text. It's a horrible mess. So I've been working on remedying this. I'm currently working on the verbs to make them a bit nicer.
Update Feb 24 1:07:44: The cleanup is finally done (until I find something else to clean, that is). T_T
Ars Linguarum's endgame is to be the foundation of a planned project. I haven't named it yet, but it's going to automatically generate a story. I plan to manually do part-of-speech tagging on (and other analysis of) some stories, including my own Modificatio Aeneidis, and write a program to generate characters, settings and plots to produce an entire story. It's perhaps a bit ambitious (not sure how I'm going to do the plot-generation part), but it can't hurt to try.
5:04:36 AM: Vocabitur Artem Fabularum.
I added pronoun support to Ars Linguarum. It can handle tricky ones like quis (common + neuter in the singular, masc + fem + neuter in the plural).
I added more verb conjugations to Ars Linguarum (1st, 2nd, 3rd and 4th deponent + 3rd, 3.5th and 4th). Fun. I plan to add guessing support to verbs. Principal parts unambiguously provide all necessary information to fully conjugate a regular verb, so it should be easy.
I've been working on this thing that inflects Latin words. Its name (probably subject to change) is Ars Linguarum. It's pretty cool, so you should check it out. A pretty cool feature is that it's good at guessing missing parts of noun entries. Examples:
n|rēs, it'll deduce that it's a feminine 5th declension noun with genitive
n|navis|i, it'll deduce that it's a feminine 3rd declension noun with genitive
n|castra|pl, it'll deduce that it's a neuter 2nd declension noun with genitive
It autofills verb forms for regular verbs (though 2nd and 3rd conjugation require multiple principal parts). For 1st conjugation, it only requires the first principal part!
Previously, it guessed that
laus,laudis:f is 2nd declension (as though it were
laus,laī:m!) and pretty much discarded its genitive, but I fixed that by making it check the genitive to make sure it also looks like it's 2nd declension.
Greetings! I made some awful blog thing. It's absolutely terrible. Sorry.
|
OPCFW_CODE
|
Success Factors for Agile Delivery in the Federal Government
Paul Gorans (IBM Global Business Services) and Philippe Kruchten (University of British Columbia) published A Guide to Critical Success Factors in Agile Delivery. This guide discusses the values, benefits and challenges of agile and proposes critical success factors for implementing agile delivery in the federal government.
InfoQ interviewed Paul about implementing agile practices, how agile impacts acquisition and procurement, scaling agile communication and the usage of reviews in agile.
InfoQ: What made you decide to write this guide on implementing agile delivery. To whom is it targeted?
Paul: Our IBM Center for the Business of Government had heard about the success of one of our IBM Agile programs with a U.S. Federal Agency that I had helped transition to an Agile approach, and the formal implementation of my Agile Competency in IBM Global Business Services, Federal. They recommended that I write a guide to help other Agencies understand some of the Agile basics, and the critical success factors that I feel are required for Agile delivery, and address a few U.S. Federal specific aspects that we are often asked about (e.g. how do you conduct 508 testing in Agile?).
InfoQ: What in your own words does it take for organizations to effectively implement agile practices?
Paul: Executive sponsorship from the business/mission and CIO. If you have that, then you have the ability to address all factors required for Agile success, including the ten that we included in our guide.
InfoQ: Changing the acquisition process is one of the success factors in the guide. What makes this so important for agile delivery?
Paul: In both commercial or federal entities, I have seen acquisitions ask for "Agile", while asking for traditional IT phases and gates, or including measures of evaluating vendors or performance exclusively on low cost as opposed to value delivered. That signals to me that the term may have been written into the proposal at the last minute at the request of a stakeholder, but all parties may not yet be on the same page. It makes potential partners/vendors unsure of what the client is asking for, vary the responses, and make it harder to evaluate proposals. For any acquisition (regardless of the approach taken), procurement should be on the same page with what the business wants (often a solution to a business problem that includes IT), the leading practices that best help them to achieve that goal, and what is required by all parties involved for success.
InfoQ: The agile delivery guide suggests to implement more verbal communication and dashboards. Is this something that can be scaled? Can you give examples how this can be done in larger organizations?
Paul: Yes. Both verbal communication and dashboards can be scaled. On projects with many agile teams, a communication plan should be developed that is fit to the program (note that I often relay the fact that traditional project management practices are still relevant in Agile, but PMs (or Scrum Masters) need to rethink the level of detail they are written at and how they are conducted). The verbal components of that are the daily standup meeting on each agile team, and a cross team (e.g. Scrum of Scrums) after the Agile team stand-up meetings within a program, to raise awareness of impediments and seek support for impediments that impact all teams.
On our IBM projects, I have implemented an IBM-only daily standup/status that allows our Agile project leads to directly communicate with our Project Executive (PE) and key delivery managers. It assures that the Project Executive is seldom surprised by anything going on, and affords the PE an opportunity to directly interact and mentor many junior project managers for a bit on a daily basis, and provides he or she information for them to communicate with their executive level clients or partners. Using other follow-up, written emails and one page status should be fit to purpose in support of those verbal communications as well.
Regarding dashboards, wiki's can work for some communications, however there are now many Agile tools or suites of tools that scale to multiple Agile teams, programs and organizations. This allows both developers and executives to see the same detailed or aggregated information as soon as it is available from one repository. Stories, build statistics, impediments, burn down charts,can be seen from any desktop, regardless of where the various teams and stakeholders are physically located.
One constraint to scaling tooling for end to end visibility is that multiple functional areas within an organization are often responsible for acquiring and managing tooling for their traditional function (e.g. requirements, testing, configuration management, deployment). That again requires executive support to work across business and IT groups to define a solution that provides end to end value.
InfoQ: An agile way of working asks for different kinds of reviews as the guide mentions. Can you elaborate on that?
Paul: The review of a story by the product owner as it is completed, or a demonstration of a group of stories by a broader set of stakeholders at a Sprint review are reviews (of a working product). An Agile approach, even for a large project that is decomposed into many Agile teams, and lean support teams, still provides an opportunity for other reviews, but to be efficient those reviews should happen more iteratively. For example, this may require that staff responsible for standards to review design decisions, or code (including output from automated code quality tooling) as it is produced for each release, iteration or story. That requires stakeholders to change how their staff works on a daily basis to support the needs of Agile teams (daily interaction with teams in review/feedback cycles as opposed to checkpoints that could result in costly refactoring).
On larger projects, I also recommend reviewing the prioritized capabilities that are planned for a release period (fixed time, fixed resources) with the executive stakeholders. Far too often, teams start off conducting Agile delivery, without setting some expectation of what can be delivered by a given end date, or without doing some high level estimating first. Then, at that point, they may be way off, and may not have planned for enough Agile capacity to support the executive stakeholder needs and commitments. There are techniques that we have used for years to conduct "just enough" estimating up front as opposed to waiting to estimate after generating a sample velocity (that validation is still critical after a couple of Sprints). This provides stakeholders some indication about what can be delivered, and helps to earn the trust of stakeholders that may not be convinced of the value of an Agile delivery approach.
InfoQ: How can organizations use this guide when that want to adopt an agile way of working or improve the results of an ongoing agile adoption?
Paul: I recommend that they use the guide as they either consider implementing an Agile approach for the first time, or as a quick evaluation of their current implementation of Agile. They then should write down the gaps that they believe they have and develop plans to address them. Then, for gaps that they don't believe they can address themselves, they should secure the assistance of a partner with capabilities commensurate with their need.
|
OPCFW_CODE
|
Fabulousfiction – Chapter 1242 Sorokin’s Bizarre Adventure 3 ducks volleyball read-p1
Thriven and throfiction The Legendary Mechanic novel – Chapter 1242 Sorokin’s Bizarre Adventure 3 tramp furry recommendation-p1
Novel–The Legendary Mechanic–The Legendary Mechanic
handbook of embroidery
Chapter 1242 Sorokin’s Bizarre Adventure 3 comfortable sheet
He seen everyone’s concept, paused, and continuing. “On the other hand, you men can’t stop me from leaking your data now, and I’m also anxious you may take steps with me. The two of us have worries. Hence, cooperating is the greatest selection for all of us. If we can peacefully arrive at an understanding, we can easily stay clear of increasing the circumstance that’ll do neither people a bit of good.”
Ecology of the Opossum on a Natural Area in Northeastern Kansas
d.a.m.n, if not because we designed connection with Dark colored Superstar just before, we might have really thought with your nonsense.
He had eyes over the magic formula of reviving over the Sanctums far too, so he was prepared to spend an increased price to take part in the Holy Accord. If he could also enjoy this freedom, his lifestyle would be guaranteed.
“How want to come together?” Oathkeeper narrowed his eyes.
Oathkeeper identified whatever had taken place without leaving any aspects.
Experiencing this, Sorokin extra, “Honestly, I was just preparing to relocate. If your Holy Accord is happy to recognize me, I won’t are available bare given. You folks should know that I’m the proprietor from the Unlimited Fiscal Crew. I will take Holy Accord a lot of cash. You’ll demand dollars whatever you’re setting up, correct?”
He did not need to uncover the information of his other ident.i.ties. The ident.i.ties of Heart and soul Emperor and Sorokin were still regarded as clear, but he acquired completed several not-so-thoroughly clean things in reference to his other ident.i.ties. If he provided this data, it will come to be new take advantage of. Hence, he declined to mention what ident.i.ties he used just before, revealing his firmness to your appropriate point, reminding the exact opposite special event that the placements have been the same on this negotiation.
“All right, I go along with your conditions. I’m willing to stay within your management temporarily, nevertheless, you males can’t secretly build traps to secure soul strength. Though that doesn’t do the job significantly against me, when you people present warning signs of executing it, I’ll see it as you may males simply being insincere about it partners.h.i.+p. I’ll consider our arrangement invalid and immediately have my manifestation leak your solution.”
He failed to would like to reveal the knowledge of his other ident.i.ties. The ident.i.ties of Spirit Emperor and Sorokin were regarded nice and clean, but he possessed completed a lot of not-so-clear points together with his other ident.i.ties. If he shared this data, it might grow to be new take advantage of. For this reason, he refused to convey what ident.i.ties he utilized right before, revealing his firmness to the suitable extent, reminding the contrary special event that the positions have been equal in this particular negotiation.
Han Xiao smiled faintly. A Label of the Underworld on the rear of his palm flickered marginally.
Sorokin smiled. “Uncomplicated. You guys are concerned about me leaky the key, therefore you don’t believe me ample to allow me roam exterior. For that reason, you might on top of that let me be part of the Sacred Accord and create me one among you. Then, isn’t the problem solved?”
“He figured out my ident.i.ty and arrived at blackmail me. I made the choice to never escalate the challenge, accepted his ask for, and partnered with him in organizations at a loss, looking to pack his appet.i.te so he won’t uncover me. But that *sshole is just too big very much. He got my funds but still wanted to show my ident.i.ty, plotting against me behind my back again. Should I don’t coach him a lesson, he’s really about to assume I can be easily bullied!”
“I never experienced the chance to secretly invasion him. Now, on the other hand, you fellas have helped me to capture the prey as well as indirectly made a key rendering floor. Also, Sorokin has not a clue that this power they have can also be productive on me. It only takes for individuals to use a demonstrate, and also the problem will probably be sorted out. Because of this , for the partners.h.i.+p to really exist, basically…”
Others ended up stunned. The expressions on their own facial looks has become unusual.
“That’s ideal. I don’t want three of the Standard Cultures to concentrate on me. He pressured me to give up the group I’ve used a long time creating,” Sorokin mentioned that has a hateful overall tone.
Sorokin obediently allow them to consider him aside. He discreetly cleaned out his frosty perspire, sensed this expertise was much like a wish and was thankful.
Oathkeeper suppressed the shock as part of his center, behaved puzzled, and reported, “Dark Celebrity? I am aware this gentleman. He only rose in the past couple of decades and appear to be amongst the best on the universe. It’s stated that his total power is unfathomable. How did you enter problems with him?”
Once the other individuals proceeded to go away, Oathkeeper was reduced as well. He was also preparing to keep Sorokin secure first.
Obtaining thought of that, Oathkeeper sealed his eye and secretly applied his soul projection to make contact with Han Xiao.
“That’s ideal. We’re all through the ancient time. The disputes we acquired during the past have dissipated after a while. We’re not adversaries now. Now was only a uncertainty. My purpose is certainly just Spirit Flames, having said that i coincidentally found out your secret. The greatest concern between us is that you simply people don’t believe me. You’re concerned which i, an outsider, will drip your information, so you should take me…”
“How do you need to come together?” Oathkeeper narrowed his eyeballs.
“Black color Celebrity!”
“Just earlier, Sorokin located us…”
d.a.m.n, or even because we manufactured exposure to Dark-colored Legend prior to, we might have really believed as part of your nonsense.
While doing so, Sorokin inevitably noticed bitter. Initially, he idea most Primordial Models possessed vanished, and he was the only one who experienced stayed alive till now. However he never described it, he actually obtained feelings of efficiency in their coronary heart, believing that he was the only person who obtained the final laugh among all the Primordial Models within the very same creation.
“The good thing is, I’m genuine adequate to earn a chance…”
Sorokin easily explained, “That won’t take place. Dark-colored Superstar is not very enthusiastic about me personally he just prefers my hard earned cash and a.s.packages. You men don’t know, but that man is endlessly greedy, domineering, ambitious, slippery, unethical, and intensely shameless… His target should be to take over the Unrestricted Economic Group’s a.s.pieces.”
“I never got the opportunity secretly infiltration him. Now, nevertheless, you people have helped me to capture the prey as well as indirectly developed a key rendering floor. Also, Sorokin has no clue that the take advantage of he has is additionally efficient on me. It only takes for many people to use a demonstrate, and the issue will likely be remedied. Because of this , for the partners.h.i.+p to really exist, basically…”
After the very first distress, Sorokin experienced a powerful involvement in the Sanctums. Yet another of his targets would be to lower the amount of Beyond Quality As in the world. Given that he had found that Beyond Quality As can be revived, he would not be able to sleeping and eat well before he determined the way transpired.
Listening to this, Oathkeeper stated, “The chance of making an outsider join us is absolutely not controllable. You hid your toughness very well, and we also don’t know significantly in regards to you. It’s very hard for us to have faith in you.”
Naturally, Sorokin and Black color Celebrity got clashes, and Oathkeeper was more on Han Xiao’s side in their heart and soul. To him, the Black Legend Army had been a more significant mate.
Sorokin was disappointed within. He was already intending to call off this course of action, but to his shock Holy Accord actively wanted to push him to concentrate on Black color Star rather.
Seeing this, Sorokin added in, “Seriously, I became just likely to relocate. In the event the Sacred Accord is pleased to acknowledge me, I won’t can come empty given. You males should recognize that I’m the dog owner of the Endless Financial Team. I can bring Sacred Accord plenty of resources. You’ll need income whatever you’re arranging, perfect?”
Sorokin wished to cuss however maintained a grin.
His hostility toward Han Xiao was extremely solid. Earlier on, he even contemplated frightening to leak their magic formula for making the Sacred Accord firm support him wipe out Black Star. With your an surprising and overwhelmingly sturdy drive, he definitely could get Black colored Superstar by big surprise.
When compared to the secret with the Sanctums, working with Black colored Star failed to appear to be that important. Hence, Sorokin failed to would like to eliminate the opportunity for discovering concerning the Sanctum as a result of anything less considerable. Furthermore, he possessed already figured out a idea from what happened today—since he chose to become coward, he must be a total coward!
In fact, Sorokin and Black Celebrity acquired situations, and Oathkeeper was more about Han Xiao’s facet on his coronary heart. To him, the Black Star Army had been a more valuable lover.
|
OPCFW_CODE
|
This pages lists the BSc. Theses topics currently available in our group. Don't hesitate to contact the respective person if you are interested in one of the topics. If you would like to write a thesis about your own idea you can propose it to the person most related to what you plan to do or you can contact Prof. Bernstein directly.
Statistical Validity in Science
Many prescribed medical pills depend on effects of certain Proteins. There are millions of ways organic molecules can be rearranged in Proteins, which is reflected in the wealth of different effects these Proteins produce in our bodies. Many of these effects are discovered through academic research and published in research papers in scientific journals or presented at conferences, which entails, that findings are evaluated in some way. Often, they are evaluated using statistics, or more specifically, Null-Hypothesis-Testing (NHST).
NHST actually has many drawbacks and is often hard to get right. For example, many of these tests (e.g. ANOVA, t-test and friends) rely on the underlying data fulfilling certain prerequisites. ANOVA, for example, requires the data to have equal variances. In a forthcoming study, we found that these assumptions are rarely checked/reported (~13% for a major scientific conference). Seeing this, we now wonder, what else may have gone wrong with the statistical validity of research papers. To this end, we plan on expanding and using our internally developed system based on Scala/Play - although, likely little code will need to be written. Instead, you'll plan a larger experiment with us, involving a couple of distinct international professors. Upon successful experimental design, follows its execution, where you (and your tool) will coordinate thousands of crowd workers towards assessing the statistical validity of research papers. Ultimately, we'll analyse our results and will try to draw an inference about some core fields of science. Such a finding might have a positive impact on many.
If you're interested, please contact firstname.lastname@example.org
Last update: October 4th 2016
Increasing the number of open data streams on the Web
The open data movement is increasing the number of available datasets on the Web. However, additional effort is required to make the data more accessible. While methods and best practices for static data have been developed during the past years, little attention has been dedicated to dynamic and real-time data. One of the differences of this-this kind of data is that it can be served through push-based mechanisms i.e. streaming APIs, in addition to common pull-based mechanisms i.e. Web APIs.
In this project, we want to increase the amount of real-time, streaming open data available on the Web. The main steps can be roughly summarised as follows:
1) identification of relevant datasets to be exposed through streaming APIs, e.g. data sets from here, here, or obtained by the city of Zurich (we are currently exploring such a use of datasets).
2) publication of the identified datasets through streaming API, by using and extending the TripleWave framework.
3) design and implementation of a proof of concept on the top of this dataset to show a potential use of the API.
If you are interested, please contact: email@example.com
Publication date: 16.11.2016
SPARQL query evaluation in Big Data processors
RDF is a framework to describe resources and relations among them as graphs. The query language to express and compose queries over RDF models is SPARQL. The RDF Stream Processing initiative has started investigating how SPARQL can be extended to query RDF streams, i.e. flows of time-stamped RDF data. As a result, several engines to continuously evaluate queries over RDF streams have been designed and implemented, e.g. C-SPARQL and CQELS, but these solutions suffer from scalability and performance issues.
In parallel, Big Data processors, e.g. Spark and Heron have been released. These systems can process data streams and to distribute the computation over several nodes, achieving performance not possible in pre-existing solutions.
The goal of this project to investigate if and how much Big Data processors covers the features offered by SPARQL. In the first part of the project, a use-case will be designed, by selecting a data stream and the operations to be performed on it. Next, the selected Big Data processors will be deployed, and the use-case operations will be implemented on the top of them. The project will end with a qualitative and quantitative comparison of the results obtained with the different processors.
If you are itneresed, please contact: firstname.lastname@example.org
Publication date: 16.11.2016
|
OPCFW_CODE
|
The general workflow is to import JPG images and set the keyframes for panning or zooming motion. Optionally, deflicker the images. Then export as a completed timelapse.
Meanwhile, RAWBlend is used before
importing into Panolapse, for processing RAW files from the camera.
- Download and unzip Panolapse.
- For Windows users, make sure to unzip the entire "Panolapse" folder (otherwise you may get
an error message about updating Adobe Air).
- Move "Panolapse" anywhere you want on your computer.
Open it and double-click on the program to start.
- Simply delete the Panolapse folder.
Panolapse Tutorial (animating a perspective-correct pan):
Tips for maximum quality:
- First, if you haven't already, check out the main video on the
homepage for a demo.
- Open Panolapse and click Import image sequence....
Select a numbered image sequence (IMG001.JPG, IMG002.JPG, IMG003.JPG, etc).
- In the Lens Settings dialog, confirm the focal length, crop factor, and lens type.
For the Output Lens Type, select Normal or Fisheye. The fisheye perspective is most apparent with photos originally shot on wide angle lenses or fisheyes.
- After images are loaded, left-click to pan/tilt, right-click (or hold CTRL) to roll, and mouse-wheel to zoom.
(On a Mac, you can use the CMD key in place of CTRL, and zoom by using the trackpad.)
- Adjust the camera angle for the start keyframe of your animation.
- Move the animation-slider to the end and adjust the camera for the ending keyframe.
- Drag the animation-slider to preview the interpolated frames.
- (Optionally) To animate zooming, check the Enable zoom animation box and set your start and end keyframes.
- Click Export Frames.
- Deflicker. If you wish to smoothen out scene brightness,
tick the checkbox Deflicker.
- Panolapse can smoothen out a scene by
adjusting each frame's brightness towards a moving average brightness defined by Rolling Window Size.
Between 8-25 is a good general amount.
- Optionally, you can specify the Sample Area, which is the
region in the scene
that Panolapse will analyze for flickering.
The program smoothens out the scene brightness
based upon the changing brightness in this specified area.
- Choose your File Format. For highest quality, export as max-resolution
JPG. Panolapse can also combine your frames into a video as .mp4 using the x264 codec
(good for Youtube/Vimeo uploads), or .mov using the PhotoJPEG codec (bigger filesizes).
All presets are high quality and considered visually-lossless.
- Click Queue for Render. Optionally, you can now do another sequence.
- When you're ready to render the batch, click Render all.
- Do not crop images before they are handled by Panolapse.
- Import and export in maximum resolution.
- I want to pan just a single image, how do I do that?
Click Tools Menu->Animate a single image... You'll be able to enter the number of frames
you want to animate the image across.
- How do I animate zooming in and out?
Click the Enable Zoom Animations checkbox to enable keyframing of the
Alternatively, you can use Panolapse just to do the rotation/panning motion.
Then export the frames in maximum resolution and do a digital zoom with any video-editing software
of your choice.
- Why does the preview appear blurry?
You can adjust the preview quality in File Menu -> Preferences.
Check the Optimal Size indicator in the bottom-left corner for the optimal output resolution.
- My computer slows down when exporting. How can I tweak performance?
Check out the Processor Threads setting in the Preferences.
The default is 3, which is a general case for dual-core or
If your computer acts sluggish while rendering, try decreasing it.
If you have a very fast computer, increase the threads.
- I can't select more than 1000 files
Try placing the files in a folder and select the first image to import.
Panolapse will automatically detect the sequence and import all of the images.
This method can be used to bypass the 1000 file limit restriction of some operating systems.
- What is "fast-copy" mode?
Panolapse has a speed optimization where it skips directly to
deflickering and video creation.
This can be useful if you wish to only deflicker a scene or make a video file.
Simply import the files as usual and then export them again - Panolapse
will detect no changes are being made and will bypass the
- (Windows) I'm having trouble opening files.
Try right-clicking on Panolapse.exe and "Run as Administrator," or
check your system security settings. Try opening one file at at time
to narrow down the problematic file.
- (Mac OSX) I can't open the program because it is made by an "unknown developer"?
Check your Security & Privacy settings to allow apps downloaded from anywhere. More information here.
- (Mac OSX) I can't see any text
This may be caused by duplicate fonts in your system (for instance by installing MSOffice). Open Font Book in the Applications Folder, select "All Fonts" in the Edit menu, and click "Look for Enabled Duplicates" (cmd-L). Delete the duplicates, empty the trash, and then open Disk Utility and click "Repair All Permissions." Something else to try is to open "Font Book" and click "Restore Standard Fonts" in the File menu.
Timelapse Tutorial and Guide
Please feel free to email firstname.lastname@example.org
|
OPCFW_CODE
|
Make isolinux 4.0.3 chainload itself
I have a bootable iso which boots into isolinux 4.0.3 and I want to make it chainload itself (my actual goal is to chainload isolinux.bin v4.0.1-debian, which should start up the Ubuntu10.10 Live CD, but for now I just want to make it chainload itself).
I can't get isolinux to chainload any isolinux.bin, no matter what version. It either freezes or shows a "checksum error" message.
I'm using VMWare to test the iso.
Things I have tried:
.com32 /boot/isolinux/chain.c32 /boot/isolinux/isolinux-debug.bin (chainload self)
this shows
Loading the boot file...
Booting...
ISOLINUX 4.03 2010-10-22 Copyright (C) 1994-2010 H. Peter Anvin et al
isolinux: Starting up, DL = 9F
isolinux: Loaded spec packet OK, drive = 9F
isolinux: Main image LBA = 53F00100
...and the machine freezes.
Then I've tried this
(chainload GRUB4DOS 0.4.5b)
chainloader /boot/isolinux/isolinux-debug.bin
Result:
Error 13: Invalid or unsupported executable format
Next try:
(chainload GRUB4DOS 0.4.5b)
chainloader --force /boot/isolinux/isolinux-debug.bin
boot
Result:
ISOLINUX 4.03 2010-10-22 Copyright (C) 1994-2010 H. Peter Anvin et al
isolinux: Starting up, DL = 9F
isolinux: Loaded spec packet OK, drive = 9F
isolinux: No boot info table, assuming single session disk...
isolinux: Spec packet missing LBA information, trying to wing it...
isolinux: Main image LBA = 00000686
isolinux: Image checksum error, sorry...
Boot failed: press a key to retry...
I have tried other things, but all of them failed miserably.
Any suggestions?
You will need chain.c32:
LABEL chain_isolinux
COM32 /boot/isolinux/chain.c32
APPEND isolinux=/boot/isolinux/isol401.bin
http://syslinux.zytor.com/wiki/index.php/Comboot/chain.c32
You also need to make your ISO properly. You need the -boot-info-table option.
mkisofs -o output.iso -b boot/isolinux/isolinux.bin -c boot/isolinux/boot.cat -no-emul-boot -boot-load-size 4 -boot-info-table CD_root
You can add additional parameters to mkisofs too (like adding support for Rock Ridge, ...).
|
STACK_EXCHANGE
|
scipy.stats.bootstrap broke with statistic returning multiple values
The issue appeared upgrading from scipy-1.9.3 to scipy-1.10.0
We are using scipy.stats.bootstrap with a vectorized statistic that returns multiple values, i.e. computes multiple statistics at once.
Consider the following example:
import numpy as np
from scipy.stats import bootstrap
def statistic_ccc(cm: np.ndarray, axis: int) -> np.ndarray:
tp, tn, fp, fn = cm
actual = tp + fp
expected = tp + fn
ret = np.nan_to_num(
concordance_array(actual, expected, axis=axis), copy=False, nan=0.0
)
# This is intended to return multiple statistics, this just inserts an extra
# dimension. With scipy-1.9.3, stacking along the zeroth dimension worked.
# However, due to the changes in PR 16455, this no longer works.
return ret[None, ...]
def concordance_array(x: np.ndarray, y: np.ndarray, axis: int) -> np.ndarray:
assert x.shape == y.shape
assert axis == -1
if x.ndim == 1:
return np.asarray(concordance(x, y), dtype=float)
if x.ndim == 2:
return np.array(
[concordance(x[k, :], y[k, :]) for k in range(x.shape[0])], dtype=float
)
assert False
def concordance(x: np.ndarray, y: np.ndarray) -> float:
xm = x.mean()
ym = y.mean()
covariance = ((x - xm) * (y - ym)).mean()
ccc = (2 * covariance) / (x.var() + y.var() + (xm - ym) ** 2)
return ccc
# Elements are (tp, tn, fp, fn)
data = (
np.array([[4, 0, 0, 2], [2, 1, 2, 1], [0, 6, 0, 0], [0, 6, 3, 0], [0, 8, 1, 0]]),
)
ret = bootstrap(
data,
statistic_ccc,
confidence_level=0.9,
random_state=42,
vectorized=True,
)
print(ret.confidence_interval)
With scipy-1.9.3, the output was
ConfidenceInterval(low=array([0.]), high=array([0.87804878]))
or, without the insertion of another dimension in statistic_ccc,
ConfidenceInterval(low=0.0, high=0.878048780487805)
With scipy-1.10.0, the output becomes
ConfidenceInterval(low=array([nan]), high=array([nan]))
or, without the insertion of another dimension in statistic_ccc,
ConfidenceInterval(low=0.23076923076923073, high=0.8962655601659751)
which still differs from the behavior in scipy-1.9.3, but produces finite values.
This is caused by the changes in #16455, more specifically the use of len in n_j = [len(theta_hat_i) for theta_hat_i in theta_hat_ji].
Due to the batching and concatenation in _bca_interval, statistic cannot insert a dimension at the end. Using theta_hat_i.shape[-1] instead of len(theta_hat_i) would solve the issue for my use case, but I'm not sure I miss anything. What do you think?
All computations in _bca_interval are done along axis=-1, so .shape[-1] looks more reasonable than .shape[0] / len(). If you agree, I can prepare a PR with that one-line fix, and add a parametrization to the existing test cases.
Also, shouldn't this be labelled bug or regression instead of query?
I broke it, so I'll fix it today.
@slanmich it looks to me like you really have a four-sample statistic. When each of tp, tn, fp, fn are 1D arrays (e.g. of length 5, like you have in data right now), statistic_ccc returns one number, right? I'm surprised you didn't get an error before. This is how bootstrap saw your data before:
data was a tuple with one sample, a 5 x 4 array,
axis=0 by default
bootstrap expects that statistic is a reduction statistic that acts along axis=0, so bootstrap would expect statistic_ccc(data, axis=0) to return an array with four elements.
If you want tp, tn, fp, fn to get reduced to a single number, this needs to be treated as a multi-sample statistic; data should have four elements. You can do this by taking data out of the tuple and transposing it, and statistic_ccc should accept tp, tn, fp, fn as separate arguments.
There is still a bug, but I'll continue to investigate.
@slanmich it looks to me like you really have a four-sample statistic. When each of tp, tn, fp, fn are 1D arrays (e.g. of length 5, like you have in data right now), statistic_ccc returns one number, right? I'm surprised you didn't get an error before. This is how bootstrap saw your data before:
data was a tuple with one sample, a 5 x 4 array,
axis=0 by default
bootstrap expects that statistic is a reduction statistic that acts along axis=0, so bootstrap would expect statistic_ccc(data, axis=0) to return an array with four elements.
If you want tp, tn, fp, fn to get reduced to a single number, this needs to be treated as a multi-sample statistic; data should have four elements. You can do this by taking data out of the tuple and transposing it, and statistic_ccc should accept tp, tn, fp, fn as separate arguments.
There is still a bug, but I'll continue to investigate.
Right, I think I also tried passing data[0].transpose(), but that causes the following ValueError, which is probably what you are referring to.
actual = tp + fp
ValueError: operands could not be broadcast together with shapes (5,4) (5,5)
That error goes away if I call bootstrap with paired=True, which I thought would apply to my case. The handling of the paired parameter in _bootstrap_iv modifies data to contain a single item, which is probably why I stayed with the original single-item data in the example.
Ah, yes, ok. I'll submit a PR soon. Also, I mentioned above that I left the ability for bootstrap to handle multi-output statistics undocumented. Other than this issue, has it been working well enough that support for multi-output statistics should be a documented feature? Any other features you'd like to see in 1.11.0?
|
GITHUB_ARCHIVE
|
Visual Studio Error D8036
Check those folders from the gameinfo.txt file, the hl2 folder is for media, the intro video, I think. Share. This documentation is archived and is not being maintained. Some time later, after trying a few things, I got to see the message and it said that I should use the release versions. navigate here
When I tried to build that project later on I got the following error message: cl : Command line error D8036: '/Fo.\obj\ms100_r' not allowed with multiple source files I don't see more stack exchange communities company blog Stack Exchange Inbox Reputation and Badges sign up log in tour help Tour Start here for a quick overview of the site Help Center Detailed Is it a coincidence that the first 4 bytes of a PGP/GPG file are ellipsis, smile, female sign and a heart? VCE2008 didn't want to work without the platform and I didn't test it with it. http://stackoverflow.com/questions/12116430/command-line-error-d8036-not-allowed-with-multiple-source-files
The correct value should be:
To play in multi-player mode using a server on another computer, click on "Find Servers" and look for a game to join. Also, will you be able to send us the .vcxproj files and properties sheets?Li ShaoVisual C++ Li Shao Wednesday, December 03, 2008 4:38 AM 0 Sign in to vote Hello Li and Apparently the echoed value is empty (unlike you supposed) - output is "Description: Echoing project GUID ()." for the Message and "ECHO is on." for the Command (which suggests again empty The time now is 03:54 AM.
I however did try converting again with the original format project with quotes removed (i.e. Tweeter Perdu(e) ? Select it and click the Launch button. I have the classic setup - directory for solution and projects in subdirs of the same name as the project.The naming of some output files is inconsistent with VC9.
I just want to make sure that its supposed to come with the copy that the SDK makes. 04-24-2008, 08:24 AM #9 Marine Join Date: Sep 2006 Reputation: The file of the project is a little , there it is necessary to specify the server and a folder under IIS, I their hands registered...At you way Debug\XWebServer is registered Start Steam and click on the My games tab. And several VC9 default values (i.e.
Page 1 of 3 1 2 3 > Thread Tools Display Modes 04-22-2008, 09:01 PM #1 Evil_Ice Guest Posts: n/a Trouble compiling the SDK source code... https://www.garagegames.com/community/forums/viewthread/68536 All rights reserved. 1>BSCMAKE: error BK1506 : cannot open file '.\optimized\audio.sbr': No such file or directory 1>Build log was saved at "file://C:\Torque\TorqueAdvanced\vc8\Optimized\BuildLog.htm" 1>Torque Game Engine Advanced - 2 error(s), 0 warning(s) Why don't you try using TF2 instead of HL2DM? I've made plans to start on a small source mod project to take advantage of the new orangebox source, and create an artistic toon-shaded FPSZ genre fast paced combat mod, and
I'll do it as soon as I can! check over here Running with detailed verbosity, I concluded that all these problems are because I am building project directly without a solution, so $(SolutionName) and $(SolutionDir) that are referenced in property sheets (for Set the Steam Root to be the full pathname of the Steam root directory. (It is probably something like c:/Program Files/Steam.) At this point, the QuickLaunch window should expand to show Any pointers as to where should I look to solve this D8036 problem?Also while I am no DOS expert, it seems to me that some values might be overescaped with backslashes.
This will allow your program to link against the DirectX SDK. Download this zip file. If you still have problem, could you send us the MSBuild log with /v:diag enabled? If you also have the VCBuild log building the same solution, that would be very helpful. http://ndsman.net/visual-studio/visual-studio-next-error-key.php You will see these two errors: Command line error D8036 : '/FoDebug HL2MP/hud_deathnotice1.obj' not allowed with multiple source files cl error BK1506 : cannot open file '.\Debug HL2MP\bitbuf.sbr': No such file
Command-Line Warning D9024. Exit from Steam. Also the include directories and some of the defines are from one of the indirectly inherited property sheets, so it again seems these work alright.Is this something that could result from
All trademarks are property of their respective owners in the US and other countries.
While at it, fix the AssemblerListingLocation default value to have a trailing slash after the configuration name. For me the solution was to change Configuration Properties / C/C++ / Output Files | XML Documentation File Name from "$(TargetPath).xml" to empty string. by Divinity_One on Mon May 12, 2008 11:39 pm . … I'm attempting to compile release HL2MP. Thanks for all your help, Boris Saturday, May 16, 2009 8:34 AM Microsoft is conducting an online survey to understand your opinion of the Msdn Web site.
Exceptional wizard, or with VC6, or created also hands ?And how you compile? Still in Visual Studio, right click on the client project and display its properties. Do the same for Source SDK Base. weblink I have a rule in one property sheet for DLLs that sets DLL names to $(SolutionName)$(ProjectName).dll.
Nevertheless, I get error MSB3107 on compiling Logic:Target "SplitProjectReferencesByType" in file "C:\Windows\Microsoft.NET\Framework\v4.0.11001\Microsoft.Common.Targets" from project "Z:\platform-test\Punctus\Logic\Logic.vcxproj":Using "AssignProjectConfiguration" task from assembly "Microsoft.Build.Tasks.v3.5, Version=18.104.22.168, Culture=neutral, PublicKeyToken=b03f5f7f11d50a3a".Task "AssignProjectConfiguration"C:\Windows\Microsoft.NET\Framework\v4.0.11001\Microsoft.Common.Targets(1046,9): error MSB3107: The specified project reference metadata for Adding \ at the end of OutDir and IntDir made the D8036 problem vanish. Again click the blank line, click the dots, and browse to the Lib/x86 directory (if you are running a 32-bit OS) or to the Lib/x64 directory (if you are running a all it is necessary that?
My /FO option looks like this /Fo"../engine/out.vc8.win32.optimized/" and doesn't use an absolute path like yours. You'll come back to it later. There are a few bugs in our conversion based on the snippets you pasted in your post: - $(ConfigurationName) should automatically be converted to $(Configuration) throughout all the property sheets (looks like Is a molotov cocktail a grenade?
In both modes, there is a radar that appears as part of the heads-up display on the screen. The quoting did prevent the include paths from being found, removing the quotes around BoostIncludeDir and IconvIncludeDir in the converted format fixed the problemAdditionalDependencies=""$(IconvLibDebug)"" in original format did not get into In any event, once you have started a game, you will see a scene that is blocked by a black text area with an OK button below. Adding a piece-wise function and its shifted version by list manipulations Can a PET 2001 be physically damaged from BASIC?
Close the dialog and exit from Visual Studio. You need to add -allowdebug -debug to the command line to launch a debug version. We have tried using VS'08 and VS'05, for both engines, to no avail.
|
OPCFW_CODE
|
using System;
using System.Collections.Generic;
using System.Linq;
using System.Text;
using System.Threading;
using System.Threading.Tasks;
using System.Windows;
using System.Windows.Controls;
using System.Windows.Data;
using System.Windows.Documents;
using System.Windows.Input;
using System.Windows.Media;
using System.Windows.Media.Imaging;
using System.Windows.Navigation;
using System.Windows.Shapes;
namespace TestGravityField
{
/// <summary>
/// Interaction logic for MainWindow.xaml
/// </summary>
public partial class MainWindow : Window
{
double xc;
double yc;
double w;
double h;
double ykoeff = 1;
double koeffScale = 1;
double IncKoeff(double val)
{
return val * koeffScale;
}
double DecKoeff(double val)
{
return val / koeffScale;
}
double xToCanv(double x)
{
var rr = (IncKoeff(x) + xc);
return rr;
}
double yToCanv(double y)
{
var rr = ((yc) - IncKoeff(y) / ykoeff);
return rr;
}
double xFromCanv(double x)
{
return DecKoeff(x - xc);
}
double yFromCanv(double y)
{
return DecKoeff(yc - y) * ykoeff;
}
public Ellipse center = new Ellipse();
public MainWindow()
{
InitializeComponent();
}
Random rnd = new Random();
struct Vect
{
public double x;
public double y;
public Vect Minus()
{
Vect vv = this;
vv.x = -vv.x;
vv.y = -vv.y;
return vv;
}
public Vect Mult(double v)
{
Vect vv = this;
vv.x = vv.x * v;
vv.y = vv.y * v;
return vv;
}
public double Len()
{
return Math.Sqrt(x * x + y * y);
}
}
class Particle
{
public Vect v; // текущая скорость
public Vect r; // текущий радиус вектор
public Ellipse point = new Ellipse();
}
Particle p1 = new Particle();
Particle p2 = new Particle();
void Cycle()
{
while(!fexit)
{
Tick();
Thread.Sleep(1);
}
}
void ToCanvas(Ellipse center, double x, double y)
{
Canvas.SetTop(center, yToCanv(y) - center.Height / 2);
Canvas.SetLeft(center, xToCanv(x) - center.Width / 2);
v1.Text = $"v1={p1.v.Len()}";
v2.Text = $"v2={p2.v.Len()}";
}
double R_field = 50;
void ApplyForce(Particle p)
{
double timekoeff = 100;
var v = p.v;
var r = p.r;
double R = r.Len();
// сила пропорциональна радиусу но со знаком минус.
var f = r.Minus();
var actualR = R;
if(R < R_field)
{
// движение положительного ядра в электронном облаке, коэффициент радиуса - константа.
actualR = R_field;
}
f = f.Mult( 5000000 / (actualR * actualR * actualR));
var vnew = v;
vnew.x = v.x + f.x / timekoeff;
vnew.y = v.y + f.y / timekoeff;
p.v = vnew;
p.r.x += vnew.x / timekoeff;
p.r.y += vnew.y / timekoeff;
Dispatcher.Invoke(() =>
{
PointToCanvas();
});
}
public void Tick()
{
ApplyForce(p1);
ApplyForce(p2);
}
void PointToCanvas()
{
ToCanvas(p1.point, p1.r.x, p1.r.y);
ToCanvas(p2.point, p2.r.x, p2.r.y);
}
private void Window_Loaded(object sender, RoutedEventArgs e)
{
w = canvas.ActualWidth;
h = canvas.ActualHeight;
xc = w / 2.0;
yc = h / 2.0;
Ellipse pcenter = new Ellipse();
pcenter.Width = 5;
pcenter.Height = 5;
pcenter.Fill = Brushes.Blue;
pcenter.Stroke = Brushes.Black;
center.Width = R_field*2;
center.Height = R_field*2;
p1.point.Width = 10;
p1.point.Height = 10;
p2.point.Width = 10;
p2.point.Height = 10;
center.Fill = Brushes.Red;
center.Stroke = Brushes.Black;
p1.point.Fill = Brushes.Yellow;
p1.point.Stroke = Brushes.Black;
p2.point.Fill = Brushes.Azure;
p2.point.Stroke = Brushes.Black;
canvas.Children.Add(center);
canvas.Children.Add(pcenter);
Canvas.SetZIndex(center, -5);
Canvas.SetZIndex(pcenter, -4);
canvas.Children.Add(p1.point);
canvas.Children.Add(p2.point);
p1.v.x = 300;
p1.v.y = -30;
p1.r.x = 0;
p1.r.y = 75;
p2.v.x = 100;
p2.v.y = 0;
p2.r.x = 0;
p2.r.y = 30;
ToCanvas(center, 0, 0);
ToCanvas(pcenter, 0, 0);
PointToCanvas();
tsk = Task.Factory.StartNew(Cycle);
}
Task tsk;
bool fexit;
private async void Window_Closing(object sender, System.ComponentModel.CancelEventArgs e)
{
fexit = true;
await tsk;
}
}
}
|
STACK_EDU
|
On Wednesday, March 25, 2020 7:09:04 AM CET tu...@posteo.de wrote: > Hi, > > the hardware consists of: > AMD Ryzen 5 3600 > MSI Tomahawk MAX > 32GB RAM (Corsair Venegeance 2x16GB,dual channel) > NVidia RTX 2060 SUPER > > cat /proc/mtrr gives me > > reg00: base=0x000000000 ( 0MB), size= 2048MB, count=1: write-back > reg01: base=0x080000000 ( 2048MB), size= 1024MB, count=1: write-back > reg02: base=0x0c0000000 ( 3072MB), size= 512MB, count=1: write-back > reg03: base=0x0dc0a0000 ( 3520MB), size= 64KB, count=1: uncachable > > -> there is a chunk of uncachable memory > > I activated the sanitizer in the kernel and in kernel I found > this: > > > [ 0.000000] MTRR default type: uncachable > [ 0.000000] MTRR fixed ranges enabled: > [ 0.000000] 00000-9FFFF write-back > [ 0.000000] A0000-BFFFF write-through > [ 0.000000] C0000-FFFFF write-protect > [ 0.000000] MTRR variable ranges enabled: > [ 0.000000] 0 base 000000000000 mask FFFF80000000 write-back > [ 0.000000] 1 base 000080000000 mask FFFFC0000000 write-back > [ 0.000000] 2 base 0000C0000000 mask FFFFE0000000 write-back > [ 0.000000] 3 base 0000DC0A0000 mask FFFFFFFF0000 uncachable > [ 0.000000] 4 disabled > [ 0.000000] 5 disabled > [ 0.000000] 6 disabled > [ 0.000000] 7 disabled > ... > [ 0.000000] gran_size: 64K chunk_size: 64M num_reg: 4 lose cover > RAM: 0G > > So I set the kernel cmdline to: > BOOT_IMAGE=/vmlinuz-5051101-64-RT root=/dev/sda11 ro console=tty0 > console=ttyS0,115200n8 enable_mtrr_cleanup mtrr_spare_reg_nr=1 > mtrr_gran_size=64K mtrr_chunk_size=64M > > rebooted and found the same problem unchanged. > > The Gentoo docs says, that there is a BIOS setting - probably under > "CPU" - which I should set from continuos to separated...but nowaday > BIOSse may look like a part of a Science Fiction moview...but I can > find those setting neither under "EZ" (for "easy settings" not under > "advanced" (for "danger ahead - you may screw up your board"). > > The bios of the board is not the top newest...it is the previous > version. > The changelog of the newest BIOS does not mention anything I would > see as related to the mtrr problem... > > Background: The RTX 2060 SUPER was intended to replace the > old GTX 960 I had....but the "BMW27" benchmark with Blender > is slower than with the GTX 960... > > How can I fix the mtrr problem (and the performance problem > of the graphics card, is related) ? > > Thanks a lot in advance for any help! > Cheers! > Meino
According to the following, MTRR is being phased out in favor of PAT: https://www.kernel.org/doc/html/latest/x86/mtrr.html Something that looks more promising: https://forums.funtoo.org/topic/1865-properly-configuring-mtrr/ Sorry I can't help more, never looked into this myself, but will probably when I do manage to find the time. If you figure it out, please let me know.
|
OPCFW_CODE
|
using UnityEngine;
/// <summary>
/// Discription:A String Extension Powered byMemoryC
/// Functions: extension a string text to showAsToast, toJavaString, or Speak out
/// CopyRight:MemoryC
/// Time:2017.02.15
/// </>
/// </summary>
namespace HuiHut.IFlyVoice
{
public static class MemoryCString
{
#if UNITY_ANDROID
/// <summary>
/// Show String as Toast.
/// </summary>
/// <param name="text">Text.</param>
/// <param name="activity">Activity.</param>
public static void showAsToast(this string text, AndroidJavaObject activity = null)
{
Debug.Log(text);
if (activity == null)
{
AndroidJavaClass UnityPlayer = new AndroidJavaClass("com.unity3d.player.UnityPlayer");
activity = UnityPlayer.GetStatic<AndroidJavaObject>("currentActivity");
}
AndroidJavaClass Toast = new AndroidJavaClass("android.widget.Toast");
AndroidJavaObject context = activity.Call<AndroidJavaObject>("getApplicationContext");
activity.Call("runOnUiThread", new AndroidJavaRunnable(() => {
AndroidJavaObject javaString = new AndroidJavaObject("java.lang.String", text);
Toast.CallStatic<AndroidJavaObject>("makeText", context, javaString, Toast.GetStatic<int>("LENGTH_SHORT")).Call("show");
}
));
}
public static AndroidJavaObject toJavaString(this string CSharpString)
{
return new AndroidJavaObject("java.lang.String", CSharpString);
}
#endif
/// <summary>
/// Speak the specified text and voicer.
/// </summary>
/// <param name="text">Text.</param>
/// <param name="voicer">Voicer.</param>
public static void speak(this string text, string voicer = "xiaoyan")
{
IFlyVoice.startSpeaking(text, voicer);
}
}
}
|
STACK_EDU
|
"Unknown column 'Favorite Gun' in 'field list'"
New Fresh install, new database. I am not sure why this is happening
C:\SquadStatJS\node_modules\mysql\lib\protocol\Parser.js:437
throw err; // Rethrow non-MySQL errors
^
Error: ER_BAD_FIELD_ERROR: Unknown column 'Favorite Gun' in 'field list'
at Query.Sequence._packetToError (C:\SQc\SquadStatJS\node_modules\mysql\lib\protocol\sequences\Sequence.js:47:14)
at Query.ErrorPacket (C:\SQc\SquadStatJS\node_modules\mysql\lib\protocol\sequences\Query.js:79:18)
at Protocol._parsePacket (C:\SQc\SquadStatJS\node_modules\mysql\lib\protocol\Protocol.js:291:23)
at Parser._parsePacket (C:\SQc\SquadStatJS\node_modules\mysql\lib\protocol\Parser.js:433:10)
at Parser.write (C:\SQc\SquadStatJS\node_modules\mysql\lib\protocol\Parser.js:43:10)
at Protocol.write (C:\SQc\SquadStatJS\node_modules\mysql\lib\protocol\Protocol.js:38:16)
at Socket.<anonymous> (C:\SQc\SquadStatJS\node_modules\mysql\lib\Connection.js:88:28)
at Socket.<anonymous> (C:\SQc\SquadStatJS\node_modules\mysql\lib\Connection.js:526:10)
at Socket.emit (events.js:315:20)
at addChunk (internal/streams/readable.js:309:12)
--------------------
at Protocol._enqueue (C:\SquadStatJS\node_modules\mysql\lib\protocol\Protocol.js:144:48)
at Connection.query (C:\SquadStatJS\node_modules\mysql\lib\Connection.js:198:25)
at Handshake.<anonymous> (C:\SquadStatJS\comms\search.js:64:21)
at Handshake.<anonymous> (C:\SquadStatJS\node_modules\mysql\lib\Connection.js:526:10)
at Handshake._callback (C:\SquadStatJS\node_modules\mysql\lib\Connection.js:488:16)
at Handshake.Sequence.end (C:\SquadStatJS\node_modules\mysql\lib\protocol\sequences\Sequence.js:83:24)
at Handshake.Sequence.OkPacket (C:\SquadStatJS\node_modules\mysql\lib\protocol\sequences\Sequence.js:92:8)
at Protocol._parsePacket (C:\SquadStatJS\node_modules\mysql\lib\protocol\Protocol.js:291:23)
at Parser._parsePacket (C:\SquadStatJS\node_modules\mysql\lib\protocol\Parser.js:433:10)
at Parser.write (C:\SquadStatJS\node_modules\mysql\lib\protocol\Parser.js:43:10) {
code: 'ER_BAD_FIELD_ERROR',
errno: 1054,
sqlMessage: "Unknown column 'Favorite Gun' in 'field list'",
sqlState: '42S22',
index: 0,
sql: "SELECT m.attacker AS 'Steam_ID', `Name`, `Wounds`,`Kills`,`Deaths`,`Kills`/`Deaths` AS `K/D`,`Revives`,m.id AS 'ID', `TeamKills`, `Favorite Gun`, `Favorite Role` FROM `PlayerWounded` m LEFT JOIN ( SELECT attacker, COUNT(*) AS `Wounds` FROM `PlayerWounded` WHERE server IN (1) GROUP BY attacker ORDER BY time ASC) w ON w.attacker = m.attacker JOIN (SELECT attacker, weapon AS `Favorite Role`, COUNT(weapon) FROM PlayerWounded WHERE attacker =<PHONE_NUMBER>4256439 GROUP BY weapon ORDER BY COUNT(weapon) DESC) fw ON fw.attacker = m.attacker LEFT JOIN (SELECT attacker, COUNT(*) AS `TeamKills` FROM `PlayerDied` WHERE server = 1 AND teamkill = 1 GROUP BY attacker) tk ON tk.attacker = m.attacker JOIN (SELECT attacker, weapon AS `Favorite Role`, COUNT(weapon) FROM PlayerDied WHERE attacker =<PHONE_NUMBER>4256439 GROUP BY weapon ORDER BY COUNT(weapon) DESC) fg ON fg.attacker = m.attacker LEFT JOIN (SELECT attacker, COUNT(*) AS `Kills` FROM `PlayerDied` WHERE server IN (1) GROUP BY attacker) k ON k.attacker = m.attacker LEFT JOIN ( SELECT victim, COUNT(*) AS `Deaths` FROM `PlayerDied` WHERE server IN (1) GROUP BY victim) d ON d.victim = m.attacker LEFT JOIN (SELECT steamID, lastName AS `Name` FROM `SteamUser`) s ON s.steamID = m.attacker LEFT JOIN ( SELECT reviver, COUNT(*) AS `Revives` FROM `PlayerRevived` WHERE server IN (1) GROUP BY reviver ) r ON r.reviver = m.attacker WHERE steamID = '76561198024256439' AND server IN (1) GROUP BY m.attacker HAVING `K/D` IS NOT NULL ORDER BY `K/D` DESC, time DESC"
}
Fixed the issue... It was my fault when fixing the roles and guns I did forget to change the Role to Gun on one of the copy querrys
Use this fix: https://github.com/11TStudio/SquadStatJS/commit/94187202684015515f3c2e96a78d9a9fcee70e98 right in your code or just reclone the project (or git fetch it...)
Fixed the issue... It was my fault when fixing the roles and guns I did forget to change the Role to Gun on one of the copy querrys
Use this fix: https://github.com/11TStudio/SquadStatJS/commit/94187202684015515f3c2e96a78d9a9fcee70e98 right in your code or just reclone the project (or git fetch it...)
|
GITHUB_ARCHIVE
|
Generally, for two suitably well-behaved schemes (e.g. affine, smooth, complex) and with , their derived categories of quasicoherent sheaves, then a Fourier-Mukai transform with integral kernel is a functor (of triangulated categories/stable (infinity,1)-categories)
which is given as the composite of the (derived) operations of
tensor product with ;
push (direct image) along the other projection
with twist on the correspondence space.
As discussed at integral transforms on sheaves this kind of integral transform is a categorification of an integral transform/matrix multiplication of functions induced by an integral kernel, the role of which here is played by .
Indeed, the central kind of result of the theory (theorem 1) says that every suitable linear functor arises as a Fourier-Mukai transform for some , a statement which is the categorification of the standard fact from linear algebra that every linear function between finite dimensional vector spaces is represented by a matrix.
If is a moduli space of line bundles over a suitable algebraic curve, then a slight variant of the Fourier-Mukai transform is the geometric Langlands correspondence in the abelian case (Frenkel 05, section 4.4, 4.5).
Let and be schemes over a field . Let be an object in the derived category of quasi-coherent sheaves over their product. (This is a correspondence between and equipped with a chain complex of quasi-coherent sheaves).
The functor defined by
where and are the projections from onto and , respectively, is called the Fourier-Mukai transform of , or the Fourier-Mukai functor induced by .
When is isomorphic to for some , one also says that is represented by or simply that is of Fourier-Mukai type.
The key fact is as follows
See Orlov 2003, 3.2.1 for a proof.
Though theorem 1 is stated there for admitting a right adjoint, it follows from Bondal-van den Bergh 2002 that every triangulated fully faithful functor admits a right adjoint automatically (see e.g. Huybrechts 08, p. 6).
On the level of the DG enhancements, it is true for all smooth proper -schemes that, in the homotopy category of DG categories, every functor corresponds bijectively to an isomorphism class of objects on . See (Toen 2006).
Shigeru Mukai, Duality between and with its application to Picard sheaves. Nagoya Mathematical Journal 81: 153–175. (1981)
Lutz Hille, Michel van den Bergh, Fourier-Mukai transforms (arXiv:0402043)
Banerjee and Hudson have defined Fourier-Mukai functors analogously on algebraic cobordism.
Bertrand Toën, The homotopy theory of dg-categories and derived Morita theory, Invent. Math. 167 (2007), 615–667
Alberto Canonaco, Paolo Stellari, Internal Homs via extensions of dg functors (arXiv:1312.5619)
Discussion in the context of geometric Langlands duality is in
For a discussion of Fourier-Mukai transforms in the setting of -enhancements, see
|
OPCFW_CODE
|
A plugin that makes tiddlywiki a multi-user wiki on node
BIG DISCLAMER OF DOOM - Back up your data. Do it. This has been tested but there may be bugs that I don't know about. Also see notes below.
A lot of the documentation is in the tiddler files in the Documentation folder of the plugin, or in the wiki in the plugin information on the control panel.
To make this more accessible to people I made it so you can download a single file and then run it and everything should work. When you run it it should even open the wiki in your default browser.
To do use this go here (https://github.com/OokTech/TW5-BobEXE) and download the file for your system (BobLinux for linux, BobWin.exe for windows and BobOSX for mac). Then run the file.
It will create an index wiki in the same folder where you run the file, so if you want you can copy the file somewhere else. If you want to move it after you have run it the first time just be sure to copy the
IndexWikifolder to the same location or it will create a new one without any changes you have made.
If you are familiar with using tiddlywiki on node than you just need to put the plugin into your plugins folder and include it in your
tiddlywiki.infofile. For the moment this plugin must be located in the
OokTech/Bobsubfolder of your plugins folder and listed as
tiddlywiki.infofile. You start the server using the
wsservercommand instead of the
Also see Configuration.md.
If you want to use a fresh local install of tiddlywiki here are command line instructions:
Clone the tiddlywiki repo and get the plugin (Only do this the first time to install everything):
git clone --depth=1 --branch v5.1.22 https://github.com/Jermolene/TiddlyWiki5.git git clone --depth=1 https://github.com/OokTech/TW5-Bob.git TiddlyWiki5/plugins/OokTech/Bob mkdir TiddlyWiki5/Wikis cp -r TiddlyWiki5/plugins/OokTech/Bob/MultiUserWiki TiddlyWiki5/Wikis/BobWiki/
After that is finished, and to start up tiddlywiki later type:
cd TiddlyWiki5 node ./tiddlywiki.js Wikis/BobWiki --wsserver
In a browser go to
127.0.0.1:8080and the wiki should load. From here any tiddlers you create should have .tid files created in the
Wikis/BobWiki/tiddlersfolder, any edits you do to those files should be immediately reflected in the browser. Open the tiddler called
$:/ServerIP, if you go to the ip address listed there on port
8080(on mine right now the tiddler says
192.168.0.15, so I put
192.168.0.15:8080in the browser of another computer on the same network to access the wiki). Now any changes you make to tiddlers on one computer will be reflected almost immediately on the other, and any changes you make to tiddlers or the file system will be almost immediately reflected in all connected wikis.
If you want to use the global tiddlywiki install you have to set the environment variable
TIDDLYWIKI_EDITION_PATHto the folder where you have your plugins. On OSX or Linux you open a terminal and type these commands:
export TIDDLYWIKI_PLUGIN_PATH="/path/to/your/plugins" export TIDDLYWIKI_EDITION_PATH="/path/to/your/editions" tiddlywiki editions/BobWiki --wsserver
If you want to change settings see Configuration.md for information.
When a new version of Bob is released you can update your plugin like this. If you followed the instructions above exactly than you use this. If you cloned the repo elsewhere than you need to cd into the folder where you cloned the plugin.
You can do this to make sure you have the most recent version, running this command when you already have the newest version does nothing and won't break anything so you can try it if you are not sure without worrying.
In a terminal type these commands:
cd TiddlyWiki5 cd plugins/OokTech/Bob git pull
This is to update your version of tiddlywiki, not Bob.
When TiddlyWiki release a new version you need to update your TiddlyWiki version also. This assumes that you followed the above instructions exactly. If you cloned the TiddlyWiki repo somewhere else than you have to cd into that folder instead.
In a terminal type these commands:
cd TiddlyWiki5 git fetch --all --tags --prune git checkout tags/v5.1.22
To use future or previous versions you would change the
5.1.22in the last command to match the version number you want to use.
NOTE 1 - .meta files: there isn't full support for .meta files. The only currently known limitation is that when you rename either the .meta file or the file it describes the changes aren't correctly reflected in the browsers. Renaming in the browser works as expected. Also empty .tid files are created for any tiddler with a
_canonical_urifield in addition to the .meta file. This has no effect on the wiki.
NOTE 2 - command line arguments and configuration: I am terrible with command line arguments. To prevent the need to have 10 or 15 command line arguments in order to fully configure a wiki I instead added a
settingsfolder in the same folder that holds the
tiddlersfolder and the
tiddlywiki.infofile. Inside this folder there is a
settings.jsonfile that you can use the configure the wiki. This also lets you change the wiki's settings from within the wiki. Most of the settings wouldn't take effect until the wiki server is reset, so I made a way to reset the wiki server from inside the wiki. You can also shutdown the wiki server from inside the wiki.
Here is a more detailed list of things added or changed by this plugin
wsserverthat starts up a minimal http and websocket server used for the real-time communication between the browser and server.
externalserverwhich starts up the wiki without a server so that you can use an external server, like an expressjs server.
|
OPCFW_CODE
|
Welcome to WebmasterWorld Guest from 22.214.171.124
Forum Moderators: mademetop
the advice seems to be that changing things like page names (URLs) and site structure (navigation) is hazardous with regard to search engine results if your pages are already indexed; but what about the long term affect?
We dont rely much on search engine results, but i can see that there are some fundamental changes that could be made to our site that could produce better SE results. The pages already indexed are old, and not 'optimised'. I'd like to optimise them, to the best of my ability, and perhaps lose good SE results in the short-term (tomorrow), but achieve better results down the track (6 months?).
For example, id like to change the navigation structure, aswell as some page names that arent keyword focused. I would in effect be creating some 'dead' links, but i assume these would only be short-term until my site is re-indexed?
Thinking 'long-term', would these changes be better for my site? Or would they effectively kill off the importance placed on my site by SEs for an indeterminable amount of time?
I'd appreciate your thoughts :)
a couple of suggestions: leave all the current pages as they are except add a redirect to the new page you are replacing it with. I do this with meta refresh zero, but others suggest a permanent redirect. By doing this, the previously established content is still there, it's just sending traffic to a page that is hopefully more relevant with the modifications, and which will end up ranked higher.
this can be a mess with a large site and once the new pages are getting traffic you may want to go back and pull the old pages to tidy up the site.
I would do as neuron said, use 301 permanent redirect to let the SE bots that your page has been permanently moved elsewhere.
Google is really quick at picking up 301's, but it's a different story [webmasterworld.com] for Yahoo and co. (Msg #3 from Tim which is the Yahoo! official here on WebmasterWorld).
Depending on the existing extension of your pages you have two solutions:
Hope this helps,
In fact i'm doing just that for a six-year old site right now, and it's not the first time, although the task becomes greater for each time - this time it's affecting almost the whole site which is a PITA but it's also the right thing to do, so i have to do it. It's a natural step in maintaining a site (ie. not just a collection of pages) as sites do develop, and their structure should reflect this.
The only reason that i do this right now is because of the user benefit - my users will notice this immediately, while the SE's will be slow at recognizing the changes. Otherwise, if i had thought about SE's only, i might have hesitated, as:
I have to tell you that right this moment both Google and Yahoo seems to have some problems in handling redirects properly. Also, Google has introduced some kind of a delay in the way it handles new pages and this definitely makes it more unattractive in the short term.
I do these things:
Ten step guide to re-organizing your site
1) first, create the new directories and the content for them.
2) upload it and correct the links on the site so that they point to the new URLs
3) then create "page moved" pages for the old URL's instructing users to change bookmarks and links
4) then setup 301 redirects in your .htaccess file from old -> new page URL
5) then, upload the "page moved" pages
6) observe SERP chaos (mainly wrong URLs) for a month or so, perhaps more
7) note that your previously great PR pages are now white or low for a while
8) remove the 301 redirects after a few months, so that your users will see the pages from (5) and hence get prompted to change their links
9) make sure that the important SE's have shown the right URLs in the SERPS for some time before (8)
10) Don't panic and redo stuff because of (6) and (7) - make a firm decision and stick to it
You could omit steps (3), (5), and (8) but you will not be able to avoid (6) and (7). Step (10) is essential.
Also, if you do (5) before (4) you will risk that some SERPS will show a "page moved" message for a long time (or whatever the title of your moved page-page is). The crawler could be visiting you just as you upload the pages. Right now, i do have #1 rankings for some keywords with a "page moved" page because i accidently did it the other way round for one directory on the site in question. (This will be corrected, however, as i now have the 301's in place. I expect a month or so with the wrong snippet+url in SERPS)
You might find more info in this thread: Site Change of URL [webmasterworld.com] - i've written about a few different (closely related) issues there.
claus i read through that whole thread, and i think its interesting that a tutorial in the "dynamic" form of a forum/thread is twice as helpful as their static counterparts (some of my questions have already been answered and i havnt even asked them yet)
joined:Apr 29, 2004
[edited by: pageoneresults at 10:52 am (utc) on May 11, 2004]
[edit reason] Removed URI Reference - Please refer to TOS [/edit]
Easiest solution is to change providers NOW! Trust me, the day will come that you don't want to be on a windoze server anymore. Perhaps when the next worm comes around in a few weeks...
Second, even for windoze there is Apache. And Apache allows .htaccess. So I have to conclude that your provider does not even use the most used web server software there is (i.e. Apache with now about 70 per cent market share) but some other software. Again, change providers NOW! Well, even other web servers have the possibility to somehow send raw headers.
And if your ISP at least allows PHP on their servers it will be as easy as replacing your current pages with a script that outputs these two lines:
header("HTTP/1.1 301 Moved Permanently");
And you're done! Oh, if your original pages weren't PHP to begin with you may need to convince your web server to treat whatever file extension they were as if they are PHP scripts.
[edited by: pageoneresults at 10:52 am (utc) on May 11, 2004]
[edit reason] Examplified URI [/edit]
Generally correct. If you have a page stuffed with nice little keywords for a search engine to pick up and after getting to top rankings you introduce a redirect to get your site visitors to another page, that is spamming.
But to let your site visitors (and search engines) know that a previously found here page has moved to over there is not only okay, but good practice. It stops people running into broken links and helps understand search engines that the page still exists, but has "Moved Permanently". No spamming.
I just saw some site that's had top rankings for years do that with a change in file naming one on one from .htm to .shtml. They're using meta-refresh and they've disappeared altogether from Google for all of those pages and the homepage. It's only a matter of time 'til Yahoo catches it as well.
How do I do the url redirects with Windows?
You will need to have your host install a third party ISAPI filter. I recommend ISAPI_Rewrite.
Once your host has installed the global files, they will then drop an httpd.ini file in your local root directory. That file will contain all the instructions for redirecting from old to new. The lines within that file might look like this...
RewriteRule /old-page.asp http://www.example.com/new-page.asp [I,O,RP]
P.S. Your host will need to be familiar with Regular Expressions as they will probably be the ones who need to do the rewrite for you. Or, you can always hire a third party who has experience with ISAPI filters and they can configure an ini for you which can then be uploaded to your root.
I don't know if it's just me that's lucky, but with Google i see these changes (301 redirects) picked up really fast at the moment. And done right too. I have seen the URL being changed in SERPS in less than five days.
Still, it's not a general rule. I think it will take longer time for very deep pages or low PR pages (pages that are not so often spidered).
I just changed a site around the lazy way. I removed the links from the homepage and put them on a page one click away from home.
No redirects or anything wise mentioned in this thread.
I then lost 2500 pages from that site in the google index.
I guess I now believe in myths.
i asked my host about 301 redirects, and they replied saying they "do not support customer error documents via IIS"
what does that mean? i read through that thread... am i missing something? Asking the wrong thing?
Sniffer, you are asking the right thing, your host seems to be missing a clue or two.
Search WW [webmasterworld.com] for "301 redirect IIS" and you will find a lot of information here.
Sniffer & Girish
If your host is unable/unwilling to do this (redirects) for you, you could use an ASP script similar to the one posted in msg 2 of this thread. [webmasterworld.com] Or you could consider moving your site to a more helpful host. HTH
<%@ Language=VBScript %>
Response.Status="301 Moved Permanently"
Response.AddHeader "Location", "http://www.sitename.com/page.asp"
<%@Page Language="C#" Debug="True"%>
private void Page_Load(object sender, System.EventArgs e)
Response.Status = "301 Moved Permanently";
Hope it helps
[edited by: pageoneresults at 12:55 am (utc) on May 13, 2004]
[edit reason] Removed URI Specifics - Please Refer to TOS [/edit]
please clear my doubt.
Mozart is right. Redirecting is not against guidelines, only some special types of redirects are against the guidelines.
You have a good ranking page about apples. You redirect that page to another page that is really about "widget sales", but only if the visitor is not the Googlebot. So, people see an apple page in the SE listings, but the page they see when they click on the link is a widget sales page in stead. That is the type of redirects that the SE's don't like.
You still have a good ranking page about apples. You move that page from the section "food" to a new section about "fruit". It's the same page, it's just moved to another URL. That is not against any guidelines - it is perfectly okay with the SE guidelines to do that.
Your company changes name from "food-co" to "fruit-co". For that reason the website URL and name needs to be changed as well, so you move all your pages to the new domain and redirect from "food-co.com" to "fruit-co.com". That is also perfectly okay with the SE guidelines.
|
OPCFW_CODE
|
MS Access is the most suitable program for you! You can keep your records in an Access database which can be viewed easily using your workstation if you need specific information. If you have no idea about where to start, don’t worry, we have various MS Access examples. In this case, the most suitable for you is Microsoft Access Library management templates database.
Interested to open small library as your business? It’s a good way to share knowledge all over the world, at least, in your environment. However, to run a small library, you don’t only need a collection of books in neat bookshelves. You should also create a good management system regarding your library. There should be specific records for your books, which books are borrowed, and which books left on your bookshelves. All of this information can be found easily if you provide your library with good records system. If you want such simple-but-complete system to records your entire daily “book transaction”.
You just have to download our MS Access examples, and then do some adjustment based on your needs. This MS Access example will help you to create Access library database for your library’s daily activities. If previously you don’t have such database, don’t worry, this Access database template will automatically create an Access database for you which can be accessed easily from your workstations. Opening the Access database template, you’ll find a Borrow List in main page. How to track those activities? On the tab above the column, there are several buttons which help you tracking those activities. For example you want to find out what are books returned in specific date, you’ll just have to click the “Return Book” button, and then you’ll easily get detail information regarding the books which have returned on certain date. In case the book is late to return, you can add some fine to your customers. Some small fines can help you to build discipline for your customers. You can also find detail information regarding your customer by using this Access library customer membership database. But first of all, you have to register all your customers by using “Member Registration” button. Your records will be displayed in main page. There are some columns to record library’s activities there, such as “Borrow ID”, “Member Name”, “Book Title”, “Borrow Date”, “Return Date”, and “Status”. This form will make sure you can monitor all your library’s daily activities. Those data will be stored in your Access database and you can call the specific data needed using your workstations.
This access database template will help you to spread knowledge to your neighborhood. More than business, you can also develop your communities by sharing your book collection to people in your environment.
Related Access Database
- microsoft access 2016 templates library
- library management system in ms access download free
|
OPCFW_CODE
|
When a user launches an iPhone app it’s really important to give them a consistent experience. They should be able to trust that the app is going to allow them to accomplish the task they have set out to complete. Throwing them into a saved state from three days ago is jarring and tedious to unwind. App developers need to be aware that users will sometimes be in a hectic environment with time constraints. A user correlates the app icon with solving a problem they have, not displaying the solution to the last problem they solved.
In the Olden-Times
Before iOS 4 many apps didn’t have this problem. State saving wasn’t part of the OS, so most apps launched in a blank slate state. Some apps had started using URL based Navigation, as popularized Facebook’s three20 framework, as a way to save state. Apps using this method to preserve state often fall victim to placing their users in a deep location within a navigation tree with no way to escape. Facebook for iPhone attempts to solve this problem by allowing the user to tap the Facebook logo to return to the home screen. However, this isn’t a standard UIKit action, so most users don’t know it exists.
Multitasking Made it Worse
After iOS 4, almost all apps suffer from this problem. If the application is complex enough to have multiple levels of depth in a Navigation Controller, then it will suffer from this problem. The problem goes away once the app is killed in the background, meaning that apps with a larger memory footprint suffer this problem less. For example, your state in Tiny Wings is only saved for a short period of time before it is killed because it uses so much memory. On the other hand, Twitter for iPhone will hang around in memory much longer because it has a comparably smaller memory footprint. However, since Twitter for iPhone uses URL based Navigation, it still suffers from this problem even after being killed in the background.
The solution is Temporal Awareness.
Apps can easily record the last time they were closed or lost focus. If the app then checks this time when it is next launched, it can tell how long it has been since the user last used it. If that time is past some constant, say a few hours, the user state should be cleared and they should be presented with the main screen with empty input fields.
If the app in question requires the user to enter more than a few values, a flight booking app for example, then a history view should be employed if the user wishes to return to a previous query. The app should still launch on the main screen with the user input fields blank.
Games should always save the state once they lose focus. They should still make use of Temporal Awareness to decide how to present the saved state to the user. If it has been less than a few hours, simply putting the user back into the game with a resume button is fine. If it has been longer, then presenting them with the main screen of the game, but with an option to continue from their last session.
How many times have you opened the built-in Maps app only to find that it is on a detail screen from a business you looked up weeks ago? Preserving this level of depth into the app would have been incredibly helpful if this was half an hour after you began planning the trip. Maps saves its state to disk because it was created long before iOS had multitasking. This was a nice stopgap, but it should be updated to be temporally aware.
After a few days of iPhone usage, you’ll end up with many pages in Safari that aren’t relevant. Since Safari is saving these URLs to disk, you will eventually be required to close them all by hand. What’s worse is Safari’s behavior once you close them all by hand and then return to the app later. The bookmarks modal view is displayed, requiring the user to dismiss this view if they just wanted to search Google or open a website that wasn’t bookmarked. Safari should be aware of the last time the user made use of the app, and display a startup view accordingly.
|
OPCFW_CODE
|
This is the Title of the Book, eMatter Edition
Copyright © 2007 O’Reilly & Associates, Inc. All rights reserved.
Chapter 10: Security and Monitoring
telephony apps. That would not be a good situation anywhere: voice is expected to
work 100 percent of the time.
But rather than respond to threats after you’ve already become a victim, you can use
a few techniques to proactively monitor for problems. These techniques are applied
at places where network traffic is concentrated: routers and softPBX servers.
Project 10.3. Logging and Controlling VoIP Packets
What you need for this project:
• A Linux PC capable of running the NetFilter firewall (iptables)
When a Linux NetFilter firewall is used to protect a group of VoIP bastion hosts or
just as a gateway router for a segment where VoIP is used, a lot of VoIP-related
events can be monitored and logged. Logging from the firewall is useful for the secu-
rity-minded, but it’s important for other reasons, too. It lets you get a feel for which
remote networks and hosts are communicating with your VoIP services and how
often they are. This can improve your understanding of bandwidth consumption and
traffic patterns on your network, besides giving you a keener awareness of security.
NetFilter’s default configuration provides for no logging. If you want a particular
type of packet logged, say, from a specific network or on a specific port, you must
tell NetFilter to log it. When a packet is logged, its pertinent information is sent to
syslog to be stored. Syslog is the system-wide logging daemon that is a staple in most
Unix-variant operating systems.
Some sysadmins and VoIP skeptics are concerned that a perpetrator might try to gain
access to a private IP network through the PSTN. Even if it were possible for an
attacker to fatally exploit a bug in the VoIP infrastructure—say, a codec—her only
means of transmitting data into the compromised host would be through the analog or
TDM connection to the PSTN.
Once compromised, it is possible this connection wouldn’t be running any longer,
thus cutting off the attacker’s pathway into the network. The attacker’s available band-
width would be less than 64 kbps, and he would have no means of sending IP traffic,
because his pathway into the system wouldn’t even be TCP/IP-enabled. Even if he
could crash the host, he couldn’t transmit any data to it through the PSTN. So, aside
from a denial of service due to an exploited bug somewhere in the VoIP network, the
threat here is understandably low.
|
OPCFW_CODE
|
/**
* Daily Challenge #31 - Count IPv4 Addresses
* https://dev.to/thepracticaldev/daily-challenge-31-count-ipv4-addresses-487j
*
* Your challenge is to write a function that accepts starting and ending IPv4
* addresses and returns the number of IP addresses from start to end, excluding
* the ending address. All input will be valid IPv4 addresses in the forms of strings.
*
* Examples:
* ipsBetween("10.0.0.0", "10.0.0.50") => 50
* ipsBetween("10.0.0.0", "10.0.1.0") => 256
* ipsBetween("20.0.0.10", "20.0.1.0") => 246
*/
/**
* Calculate the number of IPs available between two IPv4 addresses
* @param {string} startIpv4 Starting IPv4 address
* @param {string} endIpv4 End IPv4 address
*/
function ipsBetween(startIpv4: string, endIpv4: string): number {
return ipv4ToInt(endIpv4) - ipv4ToInt(startIpv4);
}
/**
* Convert an IPv4 address to an integer
* @param {string} ipv4 IPv4 address
*/
function ipv4ToInt(ipv4: string): number {
// https://ihateregex.io/expr/ip
const ipv4Re: RegExp = /(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)(\.(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)){3}/;
if (ipv4Re.test(ipv4) === false) {
console.warn('Invalid IPv4 string supplied');
return;
}
return ipv4.split('.').reduce((total, octet) => (total << 8) | Number(octet), 0);
}
/**
* Test
*/
(function Main(): void {
const tests: [[string, string], number][] = [
[['10.0.0.0', '10.0.0.50'], 50],
[['10.0.0.0', '10.0.1.0'], 256],
[['20.0.0.10', '20.0.1.0'], 246]
];
for (const [args, expected] of tests) {
const result = ipsBetween(...args);
console.log(result === expected ? 'PASS' : 'FAIL', { args, expected, result });
}
})();
|
STACK_EDU
|
import re
import utils
from os.path import join
import logging
_text_window = 150
_head_text_window_size = 200
class Rule(object):
def __init__(self, name, compare_type=-1, containing_pattern=False, case_sensitive=False, more_context_sents=[]):
self._name = name
self._compare_type = compare_type
self._is_containing = containing_pattern
self._is_case_sensitive = case_sensitive
self._reg_ptns = []
self._more_context_sents = more_context_sents
@property
def name(self):
return self._name
@property
def compare_type(self):
return self._compare_type
@property
def more_context_sents(self):
return self._more_context_sents
@property
def is_case_sensitive(self):
return self._is_case_sensitive
@property
def is_containing_patterns(self):
return self._is_containing
@property
def reg_patterns(self):
return self._reg_ptns
def add_pattern(self, ptn):
if self.compare_type == -100:
pass
elif not self.is_containing_patterns and not ptn.startswith('^') and not ptn.endswith('$'):
ptn = '^' + ptn + '$'
elif self.is_containing_patterns:
ptn = '.*' + ptn + '.*'
try:
if self.compare_type == -100:
reg_p = ptn
elif self.is_case_sensitive:
reg_p = re.compile(ptn)
else:
reg_p = re.compile(ptn, re.IGNORECASE)
self._reg_ptns.append(reg_p)
except Exception:
logging.error('regs error: [%s]' % ptn)
exit(1)
class AnnRuleExecutor(object):
def __init__(self):
self._text_window = _text_window
self._filter_rules = []
self._skip_terms = []
self._osf_rules = []
@property
def skip_terms(self):
return self._skip_terms
@skip_terms.setter
def skip_terms(self, value):
self._skip_terms = value
def add_filter_rule(self, token_offset, reg_strs, case_sensitive=False, rule_name='unnamed',
containing_pattern=False, more_context_sents=[]):
rule = Rule(rule_name, compare_type=token_offset,
containing_pattern=containing_pattern,
case_sensitive=case_sensitive, more_context_sents=more_context_sents)
for p in reg_strs:
rule.add_pattern(p)
self._filter_rules.append(rule)
@staticmethod
def relocate_annotation_pos(t, s, e, string_orig):
if t[s:e] == string_orig:
return [s, e]
candidates = []
ito = re.finditer(r'\b(' + re.escape(string_orig) + r')\b',
t, re.IGNORECASE)
for mo in ito:
# print mo.start(1), mo.end(1), mo.group(1)
candidates.append({'dis': abs(s - mo.start(1)), 's': mo.start(1), 'e': mo.end(1), 'matched': mo.group(1)})
if len(candidates) == 0:
return [s, e]
candidates.sort(cmp=lambda x1, x2: x1['dis'] - x2['dis'])
# print candidates[0]
return [candidates[0]['s'], candidates[0]['e']]
def execute(self, text, ann_start, ann_end, string_orig=None):
# it seems necessary to relocate the original string because of encoding issues
if string_orig is not None:
[s, e] = AnnRuleExecutor.relocate_annotation_pos(text, ann_start, ann_end, string_orig)
ann_start = s
ann_end = e
else:
string_orig = text[ann_start:ann_end]
s_before = text[max(ann_start - self._text_window, 0):ann_start]
s_end = text[ann_end:min(len(text), ann_end + self._text_window)]
# tokens_before = nltk.word_tokenize(s_before)
# tokens_end = nltk.word_tokenize(s_end)
# print tokens_before
# print tokens_end
filtered = False
matched = []
rule_name = ''
for r in self._filter_rules:
s_compare = s_end if r.compare_type > 0 else s_before
if r.compare_type == 0:
s_compare = text[:_head_text_window_size]
elif r.compare_type == 100:
s_compare = string_orig
# s_compare = s_compare.replace('\n', ' ')
for reg_p in r.reg_patterns:
m = reg_p.match(s_compare)
if m is not None:
# print m.group(0)
matched.append(m.group(0))
rule_name = r.name
filtered = True
break
return filtered, matched, rule_name
def execute_context_text(self, text, s_before, s_end, string_orig, start, end, more_context_sents=None):
filtered = False
matched = []
rule_name = ''
for r in self._filter_rules:
for st in self.skip_terms:
if st.lower() == string_orig.lower():
return True, [st], 'skip terms'
s_compare = s_end if r.compare_type > 0 else s_before
if r.compare_type == 0:
s_compare = text[:_head_text_window_size]
elif r.compare_type == 100:
s_compare = string_orig
elif r.compare_type == -100:
in_cut_off = AnnRuleExecutor.cut_off_matching(text, r.reg_patterns, start)
if in_cut_off:
filtered = True
matched.append('CUTOFF: %s' % r.name)
rule_name = r.name
return filtered, matched, rule_name
else:
continue
if more_context_sents is not None:
if len(r.more_context_sents) > 0:
logging.debug('rule %s more context %s, on %s' % (r.name, r.more_context_sents, more_context_sents))
if -1 in r.more_context_sents and 'prev' in more_context_sents:
s_compare = '%s %s' % (more_context_sents['prev'], s_compare)
if 1 in r.more_context_sents and 'next' in more_context_sents:
s_compare = '%s %s' % (s_compare, more_context_sents['next'])
logging.debug('s_compare [%s]' % s_compare)
# s_compare = s_compare.replace('\n', ' ')
for reg_p in r.reg_patterns:
m = reg_p.match(s_compare)
if m is not None:
# print m.group(0)
matched.append(m.group(0))
rule_name = r.name
filtered = True
logging.debug('%s matched %s' % (s_compare, reg_p.pattern))
break
return filtered, matched, rule_name
def add_original_string_filters(self, regs):
self._osf_rules += regs
def execute_original_string_rules(self, string_orig):
"""
filter the matching substring using patterns
:param string_orig:
:return:
"""
s_compare = string_orig
filtered = False
matched = []
for r in self._osf_rules:
try:
reg_p = re.compile(r)
except Exception:
logging.error('regs error: [%s]' % r['regs'])
exit(1)
# print 'matching %s on %s' % (reg_p, s_compare)
m = reg_p.match(s_compare)
if m is not None:
# print m.group(0)
matched.append([m.group(0), r])
filtered = True
break
return filtered, matched
def load_rule_config(self, config_file):
rule_config = utils.load_json_data(config_file)
r_path = rule_config['rules_folder']
logging.debug('loading rules from [%s]' % r_path)
for rf in rule_config['active_rules']:
for r in utils.load_json_data(join(r_path, rf)):
self.add_filter_rule(r['offset'], r['regs'], rule_name=rf,
case_sensitive=r['case_sensitive'] if 'case_sensitive' in r else False,
containing_pattern=r['containing_pattern'] if 'containing_pattern' in r else False,
more_context_sents=r['more_context_sents'] if 'more_context_sents' in r else [])
logging.debug('%s loaded' % rf)
if 'osf_rules' in rule_config:
for osf in rule_config['osf_rules']:
self.add_original_string_filters(utils.load_json_data(join(r_path, osf)))
logging.debug('original string filters from [%s] loaded' % osf)
if 'skip_term_setting' in rule_config:
self.skip_terms = utils.load_json_data(rule_config['skip_term_setting'])
@staticmethod
def cut_off_matching(text, anchor_texts, check_pos):
for t in anchor_texts:
pos = text.lower().find(t.lower())
if check_pos >= pos > 0:
return True
return False
def test_filter_rules():
t = """Sheet1
Application for Appointeeship/court
ACaCac clincic for check up
"""
e = AnnRuleExecutor()
# e.add_filter_rule(1, [r'.{0,5}\s+yes'], case_sensitive=False)
e.load_rule_config('./studies/autoimmune.v3/sample_rule_config.json')
# rules = utils.load_json_data('./studies/rules/negation_filters.json')
# for r in rules:
# print r
# e.add_filter_rule(r['offset'], r['regs'], case_sensitive=True if 'case' in r and r['case'] is True else False)
print 'working on [%s]' % t
print e.execute_context_text(t, 'ACaCac', ' clinic', 'clincic')
def test_osf_rules():
t = "ADAD-A"
e = AnnRuleExecutor()
e.load_rule_config('./studies/autoimmune.v3/sample_rule_config.json')
# rules = utils.load_json_data('./studies/rules/osf_acroynm_filters.json')
# e.add_original_string_filters(rules)
print e.execute_original_string_rules(t)
if __name__ == "__main__":
logging.basicConfig(level='DEBUG')
test_filter_rules()
# [s, e] = AnnRuleExecutor.relocate_annotation_pos("""
# i am a very long string
# with many characters, liver
# such as Heptaitis C, LIver and Candy
# """, 77, 15, 'liver')
# print s, e
|
STACK_EDU
|
import { empty } from '../../utilities/typeGuards';
import { Token } from '../../models/token';
import { TokenType } from '../../models/tokentypes';
import { booleanType } from '../ksTypes/primitives/boolean';
import { integerType, doubleType } from '../ksTypes/primitives/scalar';
import { stringType } from '../ksTypes/primitives/string';
import { OperatorKind, IType, IParametricType } from '../types';
import { bodyTargetType } from '../ksTypes/orbital/bodyTarget';
import { vesselTargetType } from '../ksTypes/orbital/vesselTarget';
import { elementType } from '../ksTypes/parts/element';
import { listType } from '../ksTypes/collections/list';
import { aggregateResourceType } from '../ksTypes/parts/aggregateResource';
import { partType } from '../ksTypes/parts/part';
import { vesselSensorsType } from '../ksTypes/vessel/vesselSensors';
import { dockingPortType } from '../ksTypes/parts/dockingPort';
import { engineType } from '../ksTypes/parts/engine';
import { volumeItemType } from '../ksTypes/io/volumeItem';
import { volumeType } from '../ksTypes/io/volume';
import { kosProcessorFieldsType } from '../ksTypes/kosProcessorFields';
/**
* This map token types to binary operator kinds
*/
export const binaryOperatorMap: Map<TokenType, OperatorKind> = new Map([
[TokenType.minus, OperatorKind.subtract],
[TokenType.multi, OperatorKind.multiply],
[TokenType.div, OperatorKind.divide],
[TokenType.plus, OperatorKind.plus],
[TokenType.power, OperatorKind.power],
[TokenType.less, OperatorKind.lessThan],
[TokenType.lessEqual, OperatorKind.lessThanEqual],
[TokenType.greater, OperatorKind.greaterThan],
[TokenType.greaterEqual, OperatorKind.greaterThanEqual],
[TokenType.and, OperatorKind.and],
[TokenType.or, OperatorKind.or],
[TokenType.equal, OperatorKind.equal],
[TokenType.notEqual, OperatorKind.notEqual],
]);
/**
* This maps tokens types to unary operator kinds
*/
export const unaryOperatorMap: Map<TokenType, OperatorKind> = new Map([
[TokenType.not, OperatorKind.not],
[TokenType.defined, OperatorKind.defined],
[TokenType.minus, OperatorKind.negate],
[TokenType.plus, OperatorKind.negate],
]);
/**
* This maps list target to types
*/
export const listTypeMap: Map<string, IType> = new Map([
['bodies', bodyTargetType],
['targets', vesselTargetType],
['elements', elementType],
['resources', listType.apply(aggregateResourceType)],
['parts', partType],
['sensors', vesselSensorsType],
['dockingports', dockingPortType],
['engines', engineType],
['files', volumeItemType],
['fonts', stringType],
['volumes', volumeType],
['processors', kosProcessorFieldsType],
]);
/**
* Retrieve the type of the follow token
* @param token token to retrieve
*/
export const tokenTrackedType = (token: Token): Maybe<IType> => {
// check literals and other tokens
switch (token.type) {
case TokenType.true:
case TokenType.false:
return booleanType;
case TokenType.integer:
return integerType;
case TokenType.double:
return doubleType;
case TokenType.string:
case TokenType.fileIdentifier:
return stringType;
default:
// if not a literally we need to lookup tracker
const { tracker } = token;
if (empty(tracker)) {
return undefined;
}
return tracker.getType({ uri: token.uri, range: token });
}
};
/**
* Create a pass type parameter for generics with only one parameter
* @param type the type to create a pass through
* @param superType the super type to create a pass through
*/
export const passThroughTypeParameter = (
type: IParametricType,
superType: IParametricType,
): Map<IParametricType, IParametricType> => {
const superTypeParams = superType.getTypeParameters();
const typeParams = type.getTypeParameters();
if (superTypeParams.length !== 1) {
throw new Error(
`Super type ${superType.name} does not have` +
` one type parameter but instead has ${superTypeParams.join(', ')}.`,
);
}
if (typeParams.length !== 1) {
throw new Error(
`Type ${superType.name} does not have` +
` one type parameter but instead has ${superTypeParams.join(', ')}.`,
);
}
return new Map([[typeParams[0], superTypeParams[0]]]);
};
|
STACK_EDU
|
RowMappers.java created in different directory
Using 4.0-beta along with VertxGeneratorStrategy for code generation. The code executes fine but when I look at the generated DAO code, I see a reference to org.jooq.generated.tables.mappers.RowMappers class in the DAO constructor, which is missing. Upon debugging, I see that the class is being generated but in a different directory instead of honoring the target directory (relative path in my case "../generated/src/main/java") as specified in the configuration file - https://www.jooq.org/doc/3.10/manual/code-generation/codegen-advanced/codegen-config-target/. To be clear all other files (pojos, daos etc.) are being generated in the specified target directory. It's only the RowMappers.java that is not. I suspect the relative path is tripping the code during initialization of the target directory.
FYI - After a bit of debugging, I modified VertxGeneratorBuilder.withPostgresReactiveDriver() to handle relative paths as shown below.
ComponentBasedVertxGenerator.logger.info("Generate RowMappers ... ");
String packageName = base.getActiveGenerator().getStrategy().getJavaPackageName(schema) + ".tables.mappers";
String dir = base.getActiveGenerator().getStrategy().getTargetDirectory()
+ "/" + packageName.replaceAll("\\.", "/");
try {
File f = new File(dir).getCanonicalFile();
dir = f.getCanonicalPath();
if (! f.mkdir()) {
ComponentBasedVertxGenerator.logger.warn("Failed to create directory " + dir);
}
} catch (IOException e) {
ComponentBasedVertxGenerator.logger.warn(e);
}
File moduleFile = new File(dir, "RowMappers.java");
JavaWriter out = writerGen.apply(moduleFile);
out.println("package " + packageName + ";");
As a result the RowMappers.java file was generated. However it disappeared at the end of code generation. Upon further debugging, I found that there's a cleanup code in org.jooq.util.JavaGenerator.generate() lines 338-340 that deletes excess files. So somehow we've to flag to this code that this directory or file should not be deleted.
Finally I got it working with further digging around. Here's the workaround:
ComponentBasedVertxGenerator.logger.info("Generate RowMappers ... ");
String packageName = base.getActiveGenerator().getStrategy().getJavaPackageName(schema) + ".tables.mappers";
String dir = base.getActiveGenerator().getStrategy().getTargetDirectory();
dir = File.separator.equals("/") ? dir.replace("\\", File.separator) : dir.replace("/", File.separator);
dir = dir + File.separator + packageName.replace(".", File.separator);
File moduleFile = new File(dir, "RowMappers.java");
JavaWriter out = writerGen.apply(moduleFile);
out.println("package " + packageName + ";");
It was the canonical path in the earlier code that resulted in different file path strings that the cleanup code saw. All other paths looked like C:\Users\foo\development\codegen\..\generated\src\main\java\org\jooq\generated\xxx.java. However, with the canonical path, the ".." was resolved and hence it was an absolute path. The above code matches the pattern with other paths. Personally I feel this is a hack. Instead I think this is what is needed - After reading the target directory from the configuration file, resolve it to a canonical path and then call the GeneratorStrategy.setTargetDirectory() with the canonical path. Then all subsequent code can leverage the right value without the above hacks. So the right place for such a code seems to be in org.jooq.util.GenerationTool.run() line 523, which the JOOQ author(s) should look at.
That is a strange behavior indeed. Especially because I'm generating code with version 4.0.0-Beta in the tests plus in projects depending on vertx-jooq.
Can you share the configuration you are using? Also if you're doing it programmatically or using maven configuration.
Here's what I've in Maven i.e. basically pointer to the external configuration file.
<!-- jooq configuration -->
<configuration>
<configurationFile>src/main/conf/jooq-pg.xml</configurationFile>
</configuration>
And here's my jooq configuration, omitted the <jdbc> section. The custom generator strategy code is attached in issue #58
<generator>
<name>io.github.jklingsporn.vertx.jooq.generate.rx.RXReactiveVertxGenerator</name>
<database>
<name>org.jooq.util.postgres.PostgresDatabase</name>
<includes>.*</includes>
<excludes>
UNUSED_TABLE # This table (unqualified name) should not be generated
| PREFIX_.* # Objects with a given prefix should not be generated
| SECRET_SCHEMA\.SECRET_TABLE # This table (qualified name) should not be generated
| SECRET_ROUTINE # This routine (unqualified name) ...
</excludes>
<inputSchema>public</inputSchema>
<unsignedTypes>false</unsignedTypes>
<forcedTypes>
<!--Convert varchar column with name 'someJsonObject' to a io.vertx.core.json.JsonObject-->
<forcedType>
<userType>io.vertx.core.json.JsonObject</userType>
<converter>io.github.jklingsporn.vertx.jooq.shared.JsonObjectConverter</converter>
<expression>someJsonObject</expression>
<types>.*</types>
</forcedType>
<!--Convert varchar column with name 'someJsonArray' to a io.vertx.core.json.JsonArray-->
<forcedType>
<userType>io.vertx.core.json.JsonArray</userType>
<converter>io.github.jklingsporn.vertx.jooq.shared.JsonArrayConverter</converter>
<expression>someJsonArray</expression>
<types>.*</types>
</forcedType>
</forcedTypes>
</database>
<generate>
<!-- Generation flags: See advanced configuration properties -->
<javaTimeTypes>true</javaTimeTypes>
<daos>true</daos>
<fluentSetters>true</fluentSetters>
</generate>
<strategy>
<name>com.el.db.jooq.CodeGeneratorStrategy</name>
<!--<name>io.github.jklingsporn.vertx.jooq.generate.VertxGeneratorStrategy</name>-->
</strategy>
<target>
<directory>../generated/src/main/java</directory>
</target>
</generator>
Thanks for your help. I've added your snippet
ComponentBasedVertxGenerator.logger.info("Generate RowMappers ... ");
String packageName = base.getActiveGenerator().getStrategy().getJavaPackageName(schema) + ".tables.mappers";
String dir = base.getActiveGenerator().getStrategy().getTargetDirectory();
dir = File.separator.equals("/") ? dir.replace("\\", File.separator) : dir.replace("/", File.separator);
dir = dir + File.separator + packageName.replace(".", File.separator);
File moduleFile = new File(dir, "RowMappers.java");
JavaWriter out = writerGen.apply(moduleFile);
out.println("package " + packageName + ";");
with minor modifications. I'll draft a release once I've figured out why the latest pg-client has dropped support for JsonObject ;)
Thank you. But as I pointed out this truly a hack. While the code works for the scenario that I've it is still very fragile. E.g. What if the target directory ends with a slash e.g. "../generated/src/main/java/". Now see the value of dir as the execution progresses. To avoid such pitfalls, we should avoid using file separators at all cost. Instead we should start with a canonical path and then use the File(dir, child) constructors where required. Hence my recommendation to bring this up with the jooq author(s). If that's a stretch or if it cannot be done for whatever reason, then perhaps consider to reset the target directory with a canonical path from your end before the code generation begins,
|
GITHUB_ARCHIVE
|
UML Use Case FAQ: Can you share an example of a Use Case diagram?
As I've been preparing to let other writers write on the devdaily.com website, I started to think about what different things (processes) an author would need to do on the website. As I thought about this, I started realizing that I was once again thinking like a business analyst, and then I thought I'd create an example UML Use Case diagram to show these processes.
Example UML Use Case diagram
Here then is a simple example UML Use Case diagram, demonstrating the use cases (or processes) that a writer/author will have on this website:
As you can see from this Use Case diagram, the role named "Author" will have the use cases shown in the "bubbles" in this diagram. After identifying each of these use cases, what I'd normally do next either write (a) some use case text or (b) user stories for each use case shown here. For instance, a simple "Log In" user story might be written like this:
The Author logs into the system using the username and password that were assigned when his account was created.
The Author will typically log in by going to the main user login page, but they can also be redirected to the login page any time they try to create new content, or edit existing content.
If the Author attempts to edit or delete content they aren't authorized to change, they will still be redirected to the login page, but they will not be allowed to edit or delete the content after the login.
If the user credentials can't be confirmed against the "users" data store, they will be redirected to the user login page for another login attempt, and the user may attempt to log in an unlimited number of times.
If the user's credentials are confirmed against the system's "users" data store (and assuming the user is assigned the Author role), they will be assigned the permissions of the Author role. Note that a user may have multiple roles, so the user may actually be an Author and Editor, and they will inherit all the permissions associated with all of these roles.
Conveys important information
As you can see, a User Story like this is simple, but still attempts to convey important information to all the reviewers of the story:
- The username and password were assigned to the Author at some earlier time, all of which implies that an Author is a User who has a special Author account in the system.
- There is a standard user login web page.
- There are alternate ways to get redirected to the login page.
- Login attempts are unlimited.
- Users may have multiple roles, and permissions associated with all those roles.
The designers of the website must agree to all these design decisions, and the system must be programmed to match these decisions.
In contrast to a simple User Story like this, more traditional Use Case text would be more formal, typically written as "The user does X", followed by "The system does Y" prose, with other sections added, such as Preconditions, Postconditions, and additional details.
Benefits of a UML Use Case diagram
The thing I like about a Use Case diagram like this is that, if you write your use case bubble text well, most people can understand a lot of what you mean by looking at the diagram.
Over the years a lot of customers and developers have made fun of my "stick figures", but I know that these Use Case diagrams helped us in many ways, including:
- Serving as a "talking point" diagram to keep meetings on track
- Finding missing use cases
- Identifying missing (and misunderstood) actors (user roles)
- Kicking off discussions of what each bubble really means
On that last bullet item in particular, a simple Use Case diagram like this can serve as the starting point for a discussion. In fact, when I'm meeting a new customer, or working on a new project, I'll either bring a diagram like this to the table, or we'll draw use case diagrams like these on a whiteboard. Of course we don't call them use case diagrams, we call them "Al's stick figures", but that doesn't matter, because we're talking about our software system in a language that everyone easily understands.
Using Use Case diagrams in requirements specifications
I typically write my software requirements specifications as a series of use cases, or possibly as XP "user stories" (if I know the customer well enough). I organize the use cases according to user roles (the "actors"), and begin each section with a summary diagram, like the Use Case diagram shown above. I think a figure like this serves as a nice introduction to what the requirements specification reader/reviewer is about to read.
A second idea is to include all these figures in a separate document, so users can "check off" each bubble after they've reviewed that use case. I mention this is that I see reviewers check off these bubbles so often, I'm tempted to say this is a "best practice".
As a final note, please don't read into this that all you have to do is create a bunch of stick figures and bubbles and call that a software requirements specification. The devil is in the details, and the text you'll write in your use cases (or user stories) will really bring out the discussions. Agreement on a Use Case diagram is one thing, but agreement on the step by step detail of use case text is a completely different story.
Example UML Use Case diagram - summary
In my opinion, Use Case diagrams can be very helpful as a communication tool, in the ways I've discussed in this article. And because communication is all about the role a business analyst performs, using Use Case diagrams wherever they enhance communication is a good thing.
If you have an example Use Case diagram you'd like to share, just use the contact form above to send it to me, and I'll add it here, or create a new web page for it. Alternatively, I can create a user account for you, and let you write your own "Use Case diagram" article here.
|
OPCFW_CODE
|
it to him - but does not broadcast. The website creates a new version of Tx2 with nLockTime set to zero and the input sequence number set to uint_MAX, then he re-signs. Think about the thousands of counterfeit bills that are currently in circulation amongst the USD? Final Words Bitcoin is an amazing new technology which can revolutionize the world. The main challenge of regulating anything that has such a huge impact on the wealth of a specific location is to create effective solutions while not hindering the development of wealth, improving companies and businesses which have an impact on the said specific location. The return value is an output: an amount of value and an address owned by the grandson. In fact, there are roughly 30,000 full nodes for Bitcoin. The latter protocol that relies on transaction replacement is more flexible because it allows the value allocated to go down as well as up during the lifetime of the channel as long as the client receives signatures from the server, but for many use cases. Thats als dtudent geld verdienen right! If Bitcoin Is So Secure, Why Do I Have To Wait For A Confirmation?
How, bitcoin, works 99, bitcoins
Welchespiele kostenlos bitcoin gewinnen
Sec bitcoin etf reject
You can send 200,000 BTC or only 2 BTC and the fee for the transaction will be the same. Be sure to download our free and awesome m Wallet to take your Bitcoin experience to the next level! Defeating this requires someone to be able to interfere with the execution of a program that may run entirely on the CPU, even in extreme cases without any data traversing the memory bus (you can run entirely using on-die cache if the program is small. Miners are rewarded in the form of receiving cryptocurrency payments for the services they provide. Since all of these transactions are Bitcoin network based, the fees are much lower than those of Credit Card networks and other financial institutions like PayPal. Including this short and simple message in the code made it very transparent that the first block was mined no earlier than that date. The problem with this statement is that there have been plenty of new and seemingly revolutionary inventions in the past that didnt take off at all, or took off for a minute or two and then sunk like a rock in a lake. This number is halved roughly every 4 years. Escrow mechanisms can be implemented to protect buyers. The same could be said about any other currency, and the answer. Nakamoto points out that honest nodes in the network need to collectively possess more CPU power than an attacker. By combining these flags together, we are able to create a signature that is valid even when other inputs are added, but breaks if the outputs or other properties of the transaction are changed.
If bitcoin drops 10, Transaction bitcoin blockchain read, Wallet bitcoin test, Tipeeestream bitcoin,
|
OPCFW_CODE
|
Man, it's been a hectic week and I can't wait to start the weekend. Instead of posting the latest indie band everybody else is posting about two hours later, I thought I'd start off your weekend with a smile by sharing my favourite Craigslist ad of all time. It's a bout a dead bird in a mailbox and somehow, it makes me happy everytime I read it.
who put the dead bird in my mailbox? - w4m
Date: 2008-04-20, 12:56PM EDT
a) how did you get into my mailbox in the first place, it is locked b) did you kill the bird c) it died horribly, that much was clear d) you're psycho e) do I know you f) if I do know you I don't want to know you g) if I don't know you, what did I do to inspire you to put a dead bird in my mailbox h) I don't know how to disinfect a mailbox from a dead bird, I'm worried about diseases and have used five different kinds of cleaner but still feel like the bird's still in there still and like my bills and my catalogues and my coupons have dead bird on them i) it was a hummingbird, I looked it up - they don't even live in New York - this is so f*ing psycho, I can't believe this
(Yes, it goes all the way down to z), if you're not laughing at y), you need a doctor to inject some marrow into your funny bone.)
j) are you the mailman? k) I'm always nice to the mailman l) the super didn't care when I told him what happened m) the neighbors didn't care either n) do you have some kind of problem with birds o) don't put anything else in my mailbox p) unless it's an apology q) no, I take that back, I don't even want an apology r) what am I supposed to do with this bird - it's in bubblewrap in a bag in a shoebox in the freezer right now - am I supposed to bury it - where? how? in a construction site where they've jackhammered through the concrete - where is a person supposed to bury things in this city? s) I could drop it in the Gowanus canal, but that seems undignified t) I could drop it in the ocean, but the ocean is so big and it is such a small bird u) I could drop it in the toilet but it would probably get stuck v) I hear this whirring around my ears every time I go to the mailbox and I'm pretty sure it's ghost bird, and I'm all "it wasn't me that killed you, bird!" but still the whirring doesn't go away until I get to the stairwell w) am I supposed to eat it - maybe you were trying to feed me - don't you know I'm a vegetarian x) if this was Ricky, I'm gonna beat your ass, mama told you stop bothering the zoo y) if this was Gina, I'm sorry, I'm sorry, how many times I gotta say I'm sorry z) I could drop it off the roof, maybe it will reincarnate while falling and I can start reading my mail again
|
OPCFW_CODE
|
let nameValue = document.querySelector("#nameValue");
let colorValue = document.querySelector("#colorValue");
let carValue = document.querySelector("#carValue");
let petValue = document.querySelector("#petValue");
const nextBtn = document.querySelectorAll("#nextBtn");
const prevBtn = document.querySelectorAll("#prevButton");
const yesBtn = document.querySelector("#yesBtn");
const noBtn = document.querySelector("#noBtn");
const answerStep = document.querySelector("#answer");
const stepFlow = document.querySelectorAll(".stepFlow");
let formStep = 1;
nextBtn.forEach(function(fbutton) {
fbutton.addEventListener("click", stepForward);
});
prevBtn.forEach(function(bbutton) {
bbutton.addEventListener("click", stepBackward);
});
function stepForward() {
if (formStep < 5) {
document.querySelector(`.step${formStep}`).classList.add("hidden");
stepFlow[formStep - 1].classList.remove("activeStep");
formStep++;
stepFlow[formStep - 1].classList.add("activeStep");
if (formStep < 4 || formStep === 4) {
document.querySelector(`.step${formStep}`).classList.remove("hidden");
}
} else {
return;
}
if (formStep === 5) {
nameValue.innerText = document.querySelector("#name").value;
colorValue.innerText = document.querySelector("#color").value;
carValue.innerText = document.querySelector("#car").value;
petValue.innerText = document.querySelector(
'input[name="pet"]:checked'
).value;
answerStep.classList.remove("hidden");
}
}
function stepBackward() {
if (formStep < 5 && formStep > 1) {
document.querySelector(`.step${formStep}`).classList.add("hidden");
stepFlow[formStep - 1].classList.remove("activeStep");
formStep--;
stepFlow[formStep - 1].classList.add("activeStep");
document.querySelector(`.step${formStep}`).classList.remove("hidden");
if (formStep === 4) {
answerStep.classList.remove("hidden");
}
} else {
return;
}
}
yesBtn.addEventListener("click", function() {
document.querySelector(".main-content").classList.add("hidden");
document.querySelector("#easter-egg1").classList.remove("hidden");
});
noBtn.addEventListener("click", function() {
document.querySelector(".main-content").classList.add("hidden");
document.querySelector("#easter-egg2").classList.remove("hidden");
});
|
STACK_EDU
|
Why do overstretched horizontal lines get introduced in this table with acmart class
I have this table, in which I want to report some results and I am using acmart class. One extra column keeps getting introduced. Here is my code:
\documentclass[sigconf]{acmart}
\usepackage[utf8]{inputenc}
\usepackage{placeins}
\usepackage{booktabs,subcaption,amsfonts,dcolumn}
\title{table example}
\date{June 2020}
\begin{document}
\maketitle
\section{Introduction}
\begin{table*}[t]
\large
\begin{tabular*}{\textwidth}{|l|l|l|}
\toprule
Model & Accuracy1 & Accuracy2 \\ \midrule
Model1&0.421& 18.47 \\
Model2 &0.46& 23.62\\
Model3&0.49 &25.7 \\
\bottomrule
\end{tabular*}
\caption{Caption}
\label{tab: tab1}
\end{table*}
\end{document}
I get stretched horizontal lines. Why is that? How can I centrally align this table with proper horizontal rules?
You can try to use tabular environment (without a star at the end) and delete the {\textwidth} part. There are examples here: https://tex.stackexchange.com/questions/341205/what-is-the-difference-between-tabular-tabular-and-tabularx-environments.
Do you really want the table to be centered with respect to both columns of the text? From your screenshot I suspect the table actually is narrow enough to fit into a single column. If you want to center the table within one column you can use \begin{table} \centering.
Please also be warned that the horizontal lines from the booktabs package are designed to be used without vertical lines. This is why you see small gaps in the vertical lines around the intersections.
The tabular* is clumped together on the left because it had a required total width of \textwidth, but the columns have no ability to stretch out to fill the width. The basic answer is to put \extracolsep{\fill} in the tabular declaration, but that makes its own different ugly result.
The vertical lines give trouble in two ways: (1) The booktabs definitions don't work with them, leaving gaps at the horizontal rules; (2) the \extracolsep problem is that the spaces stretch only on one side of each vertical line.
Before launching into messy solutions, let me exhort others to post answers using tabularx perhaps using decimal alignment; or using even better tabular layout packages that do it elegantly!
First try: natural width without vertical lines -- Would this be reasonable? Some \quad spacing is inserted to make the wider column headings be centered over the columns
\centering
\begin{tabular}{lll}\toprule
Model & Accuracy1& Accuracy2 \\ \midrule
Model1&\quad 0.421&\quad 18.47 \\
Model2 &\quad 0.46&\quad 23.62\\
Model3 &\quad 0.49 &\quad 25.7 \\ \bottomrule
\end{tabular}
Second try: stretched width without vertical lines -- This repeats the manual spacing with \quad to center the heads over their columns, but alternatives are possible, as done further down.
\begin{tabular*}{\textwidth}{@{\qquad\extracolsep{\fill}}lll@{\qquad}}\toprule
Model & Accuracy1& Accuracy2 \\ \midrule
Model1&\quad 0.421&\quad 18.47 \\
Model2 &\quad 0.46&\quad 23.62\\
Model3&\quad 0.49 &\quad 25.7 \\ \bottomrule
\end{tabular*}
Third try, ruled tabular at natural width -- One must forego the extra spacing around the horizontal lines done by booktabs, but the result of that is too ugly to tolerate (\extrarowheight does not solve the problem). Let's define some "struts" to enforce extra spacing around \hline
\newcommand\highstrut{\leavevmode\raise\jot\copy\strutbox}
\newcommand\deepstrut{\leavevmode\lower\jot\copy\strutbox}
Then use them in a ruled tabular above and below any \hline
\centering
\begin{tabular}{|l|l|l|}
\hline \highstrut
Model & Accuracy1 & Accuracy2\deepstrut \\ \hline
\highstrut
Model1 &\quad 0.421&\quad 18.47 \\
Model2 &\quad 0.46&\quad 23.62\\
Model3 &\quad 0.49 &\quad 25.7 \deepstrut\\ \hline
\end{tabular}
Fourth try: stretched with vertical lines -- where the spacing hits the fan. Since the only flexible spacing is between columns, and the rules are put in the column, one can't have equal spacing around the vertical lines unless they are placed in separate columns, and for that you need to abandon LaTeX's | and use special "rule" columns like r@{\vline}. Note the double & signs! Think of them as representing the |. Also, as long as there are large spaces between the columns, the headings can be centered over the (narrower) columns by letting them overhang the columns (\hidewidth plus ~).
\begin{tabular*}{\textwidth}{@{}
r@{\vline\extracolsep{\fill}} l
r@{\vline}l r@{\vline}l r@{\vline\extracolsep{0pt}}}
\hline
\highstrut & Model && \hidewidth~ Accuracy1\hidewidth~ &&
\hidewidth~ Accuracy2\hidewidth~ &\deepstrut \\ \hline
\highstrut & Model1 && 0.421 && 18.47 &\\
& Model2 && 0.46 && 23.62&\\
& Model3 && 0.49 && 25.7 &\deepstrut\\
\hline
\end{tabular*}
|
STACK_EXCHANGE
|
We’re excited to announce, webmasters will have more tools than ever to control the snippets that preview their site on the Bing results page.
For a long time, the Bing search results page has shown site previews that include text snippets, image or video. These snippets, images or videos preview are to help users gauge if a site is relevant to what they’re looking to find out, or if there’s perhaps a more relevant search result for them to click on.
The webmasters owning these sites have had some control over these text snippets; for example, if they think the information they’re providing might be fragmented or confusing when condensed into a snippet, they may ask search engines to show no snippet at all so users click through to the site and see the information in its full context. Now, with these new features, webmasters will have more control than ever before to determine how their site is represented on the Bing search results page.
Letting Bing knows about your snippet and content preview preferences using robots meta tags.
We are extending our support for robots meta tags in HTML or X-Robots-Tag tag in the HTTP Header to let webmasters tell Bing about their content preview preferences.
Specify the maximum text-length, in characters, of a snippet in search results.
<meta name="robots" content="max-snippet:400" />
- If value = 0, we will not show a text snippet.
- If value = -1, there is no snippet length limit.
- max-image-preview:[value]Specify the maximum size of an image preview in search results.
<meta name="robots" content="max-image-preview:large" />
- If value = none, Bing will not show an image preview.
- If value = standard, Bing may show a standard size image.
- If value = large, Bing may show a standard or a large size optimized image.
- If value is not none and not standard and not large, there is no image length limit.
Specify the maximum number of seconds (integer) of a video preview in search results.
<meta name="robots" content="max-video-preview:-1" />
- If value = 0, Bing may show a static image of the video.
- If value = -1, you allow any preview length.
Please note that the NOSNIPPET meta tag is still supported and the options above can be combined with other meta robots tags.
Example by setting
<meta name="robots" content="max-snippet:-1, max-image-preview:large, max-video-preview:-1, noarchive" />
webmasters tell Bing that there is no snippet length limit, a large image preview may be shown, a long video preview may be shown and link to no cache page should be shown.
Over the following weeks, we will start rolling out these new options first for web and news, then for images, videos and our Bing answers results. We will use these options as directive statement, not as hints.
For more information, please read our documentation on meta tags.
Please reach out to Bing webmaster tools support if you face any issues or questions.
Principal Program Manager
Microsoft - Bing
|
OPCFW_CODE
|
using AirTicketSales.Models;
using System;
using System.Collections.Generic;
using System.Text;
namespace AirTicketSales
{
class Ticket : Base
{
static int counter = 0;
public Ticket()
{
this.Id = ++counter;
this.TicketNumber = Guid.NewGuid().ToString("n").Substring(0, 8);
}
public string TicketNumber { get; set; }
public FromCity FromCity { get; set; }
public ToCity ToCity { get; set; }
public Passenger Passenger { get; set; }
public Flight Flight { get; set; }
public int Seat { get; set; }
public string Gate { get; set; }
public override string ToString()
{
StringBuilder strBuilder = new StringBuilder();
strBuilder.AppendLine(
$"************{Id}************\n" +
$"------------Ticket No:| {TicketNumber} \n" +
$"------------From: | {FromCity} \n" +
$"------------From to: | {ToCity} \n" +
$"------------Passenger:| {Passenger}\n" +
$"------------Flight: | {Flight.Name}\n" +
$"------------Seat:| {Seat}\n" +
$"------------Enter: | {Gate}"
);
return strBuilder.ToString();
}
}
}
|
STACK_EDU
|
Access Denied Windows 7
Access Denied... In Server Manager, click the Manage menu, and then click Add Roles and Features. You cannot delete a file or a folder on an NTFS file system volume This article describes why you may not be able to delete a file or a folder on Please install from the Microsoft Internet... http://itivityglobal.com/access-denied/access-denied-windows-10.html
Peter Bruzzese Andy Grogan Nuno Mota Henrik Walther Neil Hobson Anderson Patricio Jaap Wesselius Markus Klein Rui Silva Ilse Van Criekinge Books Hardware Mail Archiving Load Balancing Message Boards Migration Section Configuration SampleThe following sample illustrates several security-related configuration settings in the
Access Denied Windows 7
Unable to read from filename, it is opened by someone else.... Invalid filename.... If the SSL certificate is not in available in the bindings list then proceed with the below instructions to set the appropriate permissions.
Service Control Manager Access is denied error 14. "Service Control Manager Cannot be Opened.Access Denied" 15. You can remove, clear, or override these rules by configuring more specific rules for your sites or URLs. To do so, use the following steps. WINSOCK Error: EUSERS returned....
Creating a new home dire... Access Denied 403 IIS defines two types of authorization rules, Allow rules and Deny rules: Allow rules let you define the user accounts or user groups that can access a site, an application, or WINSOCK Error: Network is down.... Do you want to stop the service?...
Your receiving this message is by design. For Microsoft Windows Vista x64 you should have Microsoft Windows Installer 4.5 installed. Microsoft Peer Web Services setup was interrupted before your new software ... WINSOCK Error: Address family not supported by protocol family....
Access Denied 403
Secure Channel Required - This Virtual Directory requires a browser that su... http://www.printmanager.com/cms.php?aid=93&fullpage=1&support=2 Manage Your Profile | Site Feedback Site Feedback x Tell us about your experience... Access Denied Windows 7 Free hard disk space: 1GB. Error 5-Access denied to Remote Access Service Manager 2.
This can occur if the permissions on the service account that is used to start the service have been changed or does not have permissions to the Windows registry keys used have a peek at these guys Unexpected file format.... Web Console Web Server: Apache 2.2.13 and above, 32 bit (for Windows); Apache 2.2.13 and above, 32/64 bit (for Linux). Operating systems: Microsoft Windows Server 2003 SP2 (all editions); Windows Small WINSOCK Error: Version is not supported....
Microsoft Windows XP Professional x64 SP2 and above; Microsoft Windows Vista Business / Enterprise / Ultimate SP1 and above; Microsoft Windows Vista Business / Enterprise / Ultimate x64 SP1 with all actual Compatibility Version Notes IIS 10.0 The
This will open the DCOMCNFG window. Unable to connect to servers via Terminal Services Manager I can't connect to 3 2003 servers via Terminal Services Manager, I receive the error "Unable to authenticate to this server" These The number of available user licenses for the Internet Information Services h...
Access to filename was denied.
This can occur for a number of reasons, including network issues, whether the file or folder has been shared, and the share permissions might not be configured correctly. Microsoft Internet Explorer 10.0 and later (under Windows 8). Child Elements Element Description add Optional element.Adds an authorization rule to the collection of authorization rules. This commits the configuration settings to the appropriate location section in the ApplicationHost.config file.
You cannot view, access, or load some webpages in Internet Explorer 8 or Internet Explorer 9 Beta This automated troubleshooter fixes issues related to webpages not loading or displaying as expected great! Please enter no more than number characters.... this content Processor: 1 GHz or better. 1.4 GHz minimum for a 64 bit OS.
Setup cannot load "inetstp.inf". Anonymous logon request received from user name at host computer name .... Microsoft Gopher Publishing Service is running. Proxy authentication required...
If you are installing some programs over the internet, the installation may fail with an “access denied” error if Internet Explorer determines that the web site is not a trusted site. WINSOCK Error: Too many open files.... VirtualizationAdmin.com The essential Virtualization resource site for administrators.
|
OPCFW_CODE
|
Floating Point library in m68k Assembler on Amiga
09 August 2018
Part 1 - some theory
Someone told me lately: “If you haven’t developed a floating-point library, go home and do it. It’s a nice weekend project.”
I followed this advice.
I must say, it took longer than a weekend. :) But it was a great experience to see how those numbers are generated and handled, and how they 'jitter' at the last bits of precision.
The Amiga offers a number of programming languages, including C/C++ and more high level languages like Pascal or Oberon, and some Basic dialects like AMOS, BlitzBasic and others.
(I’m posting the full assembler source code at the end of the post.
As the first part of this blog I’d like to write a little about the theory of floating-point numbers.
One of the floating point standards is IEEE 754.
The sign is pretty clear, it says whether the number is positive or negative.
The 8 bit exponent basically encodes the ‘floating-point’ shift value to the left and right.
The 23 bit mantissa combines the integer part of the floating-point number and the fraction part.
The integer part in the mantissa can go through a ‘normalisation’ process, which means that the first ‘1’ in a binary form of the number matters. And everything before that is ignored, considering the number is in a 32 bit register.
Let’s take the number 12.45.
That is how it would be stored in the mantissa.
There is more to it, read upon it here if you want: https:/en.wikipedia.org/wiki/IEEE_754
Part 2 - the implementation - dec2bin (decimal to binary)
We make a few slight simplifications to the IEEE 754 standard so that this implementation is not fully compliant.
Now, how does it work in practice to get a decimal number into the computer as IEEE 754 representation.
Say, the number is:
Converting the integer part into binary form is pretty trivial. We just copy the value
As next step we have to calculate the bit length of that number because it is later stored in the exponent.
Here is the assembler code for that:
; d0 copied to d6 ; if int_part (d6) = 0 then no need to do anything cmpi.l #0,d6 beq .loop_count_int_bits_end ; now shift left until we find the first 1 ; counter in d2 .loop_count_int_bits btst.l #$1f,d6 ; bit 32 set? bne.s .loop_count_int_bits_done addq #1,d2 ; inc counter lsl.l #1,d6 bra .loop_count_int_bits .loop_count_int_bits_done move.l #32,d3 sub.l d2,d3 ; 32 - 1. bit of int move.l d3,d2 .loop_count_int_bits_end
In register d2 is the result, the bit length of the integer part.
The fraction part is a little more tricky. Bringing it into a binary form requires some thought.
I found that an algorithm that translates the fraction into binary form depends on the number of digits.
This loop can be repeated until there are no more bits in the fraction part. Or, the loop only repeats for the number of „free“ fraction bits left in the mantissa.
The threshold value, 5000 here, depends on the number of digits of the fraction part.
Here is the code to convert the fraction into binary value:
; now prepare fraction in d1 .prepare_fract_bits ; the algorithm is to: ; check if d1 > 5000 (4 digits) ; if yes -> mark '1' and substract 5000 ; if no -> mark '0' ; shift left (times 2) ; repeat until no more available bits in mantisse, which here is d3 move.l #5000,d4 ; threshold .loop_fract_bits subi.l #1,d3 ; d3 is position of the bit that represents 5000 clr.l d6 cmp.l d4,d1 blt .fract_under_threshold sub.l d4,d1 bset d3,d6 .fract_under_threshold or.l d6,d7 lsl.l #1,d1 ; d1 * 2 cmpi.l #0,d3 ; are we done? bgt .loop_fract_bits .prepare_fract_bits_end
The above code positions the fraction bit directly into the output register d7. And only so many bits are generated as there is space available in the mantissa.
Now we have the mantissa complete.
What’s missing is the exponent.
; at this point we have the mantissa complete ; d0 still holds the source integer part ; d2 still holds the exp. data ; (int part size, which is 0 for d0 = 0 because we don't hide the 'hidden bit') ; d7 is the result register ; all other registers may be used freely ; if d0 = 0 goto end cmpi.l #0,d0 beq .prepare_exp_bits_end .prepare_exp_bits ; Excess = 127 move.l #127,d0 ; we don't need d0 any longer add.l d2,d0 ; size of int part on top of excess move.l #23,d3 lsl.l d3,d0 ; shift into right position or.l d0,d7 .prepare_exp_bits_end
Notice, there is a special case. If the integer part is 0, delivered in d0, then we’ll make the exponent 0, too.
That’s basically it for the decimal to binary operation.
Test code for that is straight forward.
; dec2bin test code move.l #12,d0 ; integer part => 1010 move.l #4500,d1 ; fract part ; subroutine expects d0, d1 to be filled ; result: the IEEE 754 number is in d7 bsr dec2bin move.l #%01000001111000111001100110011001,d3 ; this what we expect cmp.l d3,d7 beq assert_pass move.l #1,d3 bra assert_end assert_pass move.l #0,d3 assert_end illegal ;include ; include "dec2bin.i"
The test code compares the subroutine output with a manually setup binary number that we expect.
Part 3 - the implementation - bin2dec (binary to decimal)
We want to convert back from the binary float number to the decimal representation with the integer part (with sign) and the fraction part in separate output registers.
In register d0 we expect the floating point number as input.
Let’s start extracting the exponent, because we need to get the integer part bit length that is encoded there.
We’ll make a copy of the input register where we operate on, because we mask out everything but the exponent bits.
.extract_exponent move.l d0,d1 andi.l #$7f800000,d1 ; mask out all but exp move.l #23,d2 lsr.l d2,d1 ; right align ; if int part = 0 cmpi.w #0,d1 beq .extract_sign subi.w #127,d1 ; d1 is now the size of int part
As next step we’ll extract the integer part bits.
.extract_mantisse_int move.l d0,d2 ; copy andi.l #$007fffff,d2 ; mask out all but mantisse move.l #23,d3 sub.l d1,d3 ; what we figured out above (int part size) lsr.l d3,d2 ; right align move.l d2,d6 ; result ; d6 now contains the int part
We also have to extract the sign bit and merge it with the integer part in register d6.
As next important and more tricky step is converting back the fraction part of the mantissa into a decimal representation.
First we have to extract the mantissa bits again, similarly as we did in the last step.
What do the ‘1’ bits in the fraction mantissa represent?
I.e.: assuming those bits:
Now, if each ‘1’ represents 5000 we have the following:
Here is the code:
clr.l d7 ; prepare output clr.l d1 ; used for division remainder move.l #1,d4 ; divisor (1, 2, 4, 8, ... ; equivalent to 2^-1, 2^-2, 2^-4, ...) .loop_fract subi.l #1,d2 ; d2 current bit to test for '1' lsl.l #1,d4 ; divisor - multiply by 2 on each loop cmpi.w #0,d4 ; loop end? if 0 we shifted out of the word boundary beq .loop_fract_end btst.l d2,d3 ; if set we have to devide beq .loop_fract ; no need to devide if 0 move.l #5000,d5 ; we devide 5000 add.l d1,d5 ; add remainder from previous calculation divu.w d4,d5 ; divide clr.l d6 ; clear for quotient add.w d5,d6 ; copy lower 16 bit of the division result (the quotient) lsl.l #1,d6 ; *2 add.l d6,d7 ; accumulate the quotient and.l #$ffff0000,d5 ; the new remainder move.l #16,d1 ; number of bits to shift remainder word lsr.l d1,d5 ; shift move.l d5,d1 ; copy new remainder bra .loop_fract .loop_fract_end
If we look at the
Let’s add a test case.
; test code for dec2bin2dec ; move.l #12345,d0 ; integer part => 1010 move.l #5001,d1 ; fract part ; subroutine expects d0, d1 to be filled ; result: the IEEE 754 number is in d7 bsr dec2bin move.l d7,d0 ; input for the back conversion bsr bin2dec cmpi.l #12345,d6 bne error cmpi.l #5001,d7 bne error moveq #0,d0 ;success illegal error moveq #1,d0 ;error illegal include "dec2bin.i" include "bin2dec.i"
Since we have now both operations, we can use dec2bin and bin2dec in combination.
We provide input for dec2bin, then let the result run through bin2dec and compare original input to the output.
I must say that there is indeed a precision loss. The last (fourth) digit can be off up-to 5, so we have a precision loss of up-to 5 thousandth.
That can clearly be improved. But for this little project this result is acceptable.
In the next „parts“ I’d like to implement operations for addition, subtraction, division and multiplication.
Here are the sources: m68k-fp-lib on GitHub
|
OPCFW_CODE
|
The Alpha release of Fedora Core 16 has been released with Gnome 3.1.4 and it is not impressing anyone so far, the Gnome 3/Unity desktop is a step backwards compared to the Gnome 2.30 desktop that it is gradually replacing. The CPU and graphics grunt required to work compared to Gnome 2 and Xfce is much greater, and even if you installed the KDE 4.7 desktop that is not much better either. The Gnome 3 desktop uses GTk 3 with all new Cascading Style Sheet styled configuration files for the themes instead of the XML used in the older GTK 2 themes. I have tried Gnome 3, Unity and KDE 4.6 and I am not very happy with any of them, Gnome 2 on GNU/Debian Linux 6.0 is far faster and more reliable than the newer desktop that needed far more work done on it before we saw it first in Ubuntu 11.04 and also in the Fedora 15 distribution that included the Gnome 3 desktop, with far less customisation options than the Gnome 2 desktop that has thousands of themes available for it on Gnome-look.org. You are better off running Gnome 2 or Xfce for your daily desktop usage, as it seems that Gnome is going downhill these days.
The Gnome 1.0 and then the Gnome 2 releases where awesome, but now they just want to create something that is inferior to the excellent desktop environments they have created before? But as I said, you can just run Xfce or Lxde and run a very fast desktop without any bloat and wastage. Many of those who have tried the Fedora 16 desktop have gone back to Windows 7, as that is a more intuitive and useful user interface than the horrible Gnome 3/Unity example. The Linux desktop is going downhill and we need to do something before every distribution goes down this road. The Gnome 1.0 environment was very good compared to the Windows `98 operating systems desktop from Microsoft corporation. The menu was very intuitive and the whole experience of using it felt very natural, just like Windows `95 in many ways. Even the Windows `95 user interface is better than what the Gnome team are putting us through right now, I can not believe that they think this is a good idea to move away from the relatively sleek and powerful Gnome 2 interface they developed and moving to Gnome 3 that is far inferior in every way.
The Unity interface is just copying the Mac OS X desktop environment and not coming up with anything that is original at all. I thought that is Linux had the Windowmaker, Blackbox and Fvwm window managers, we could create something unique but sadly they are just wanting to copy everyone else instead of just updating the older and better Gnome 2.
|
OPCFW_CODE
|
import json
import typing as t
from operator import methodcaller, attrgetter
from unittest import mock
from dataclasses import dataclass, field
from toolz import compose
import snug
from snug import Request
@dataclass
class Post:
id: int
title: str
@dataclass
class Comment:
id: int
text: str
@dataclass
class MockSender:
responses: field(default_factory=dict)
def __call__(self, request):
try:
return next(resp for req, resp in self.responses
if req == request)
except StopIteration:
raise LookupError(f'no response for {request}')
def test_querylike():
assert issubclass(snug.Query, snug.Querylike)
class TestQuery:
def test_subclassing(self):
@dataclass(frozen=True)
class posts(snug.Query, rtype=t.List[Post]):
count: int
@property
def __req__(self):
return Request('posts/', params={'max': self.count})
query = posts(count=2)
assert isinstance(query, snug.Query)
assert query.count == 2
assert query.__rtype__ is t.List[Post]
assert query.__req__ == snug.Request('posts/', params={'max': 2})
def test_subclassing_defaults(self):
class posts(snug.Query):
@property
def __req__(self):
return Request('posts/')
assert posts.__rtype__ is object
def test_init(self):
recent_posts = snug.Query(request=snug.Request('posts/recent/'),
rtype=t.List[Post])
assert isinstance(recent_posts, snug.Query)
assert recent_posts.__req__ == snug.Request('posts/recent/')
assert recent_posts.__rtype__ is t.List[Post]
def test_init_defaults(self):
recent_posts = snug.Query(snug.Request('posts/recent/'))
assert recent_posts.__req__ == snug.Request('posts/recent/')
assert recent_posts.__rtype__ is object
def test_nested(self):
@dataclass(frozen=True)
class post(snug.Query, rtype=Post):
"""a post by its ID"""
id: int
@dataclass(frozen=True)
class comments(snug.Query, rtype=t.List[Comment]):
"""comments for this post"""
post: 'post'
sort: bool
count: int = 15
assert 'post.comments' in repr(post.comments)
assert issubclass(post.comments, snug.Query)
post34 = post(id=34)
post_comments = post34.comments(sort=True)
assert isinstance(post_comments, snug.Query)
assert post_comments == post.comments(post=post34, sort=True)
def test_as_decorator_with_type(self):
@snug.Query(Post)
def post(id: int):
return snug.Request(f'posts/{id}/')
assert issubclass(post, snug.Query)
assert post(5).__req__ == snug.Request('posts/5/')
assert post.__rtype__ is Post
def test_as_decorator_no_type(self):
@snug.Query()
def post(id: int):
return snug.Request(f'posts/{id}/')
assert issubclass(post, snug.Query)
assert post(5).__req__ == snug.Request('posts/5/')
assert post.__rtype__ is object
def test_as_decorator_no_call(self):
@snug.Query
def post(id: int):
return snug.Request(f'posts/{id}/')
assert issubclass(post, snug.Query)
assert post(5).__req__ == snug.Request('posts/5/')
assert post.__rtype__ is object
class TestFromRequestFunc:
def test_simple(self):
class Foo:
pass
@snug.query.from_request_func(rtype=t.List[Post])
def posts(count: int, search: str='', archived: bool=False):
"""my docstring..."""
return snug.Request(
'posts/',
params={'max': count, 'search': search, 'archived': archived})
assert posts.__name__ == 'posts'
assert posts.__doc__ == 'my docstring...'
assert posts.__module__ == Foo.__module__
assert issubclass(posts, snug.Query)
assert len(posts.__dataclass_fields__) == 3
my_posts = posts(count=10, search='important')
assert isinstance(my_posts, snug.Query)
assert my_posts.count == 10
assert my_posts.search == 'important'
assert my_posts.__rtype__ == t.List[Post]
assert my_posts.__req__ == snug.Request(
'posts/', params={'max': 10,
'search': 'important',
'archived': False})
def test_no_defaults(self):
@snug.query.from_request_func(rtype=Post)
def post(id: int):
"""a post by its ID"""
return snug.Request(f'posts/{id}/')
my_post = post(id=5)
assert my_post.__req__ == snug.Request('posts/5/')
def test_resolve():
loaders = {Post: lambda data: Post(**data)}.__getitem__
@snug.Query(Post)
def post(id: int):
"""a post by its ID"""
return snug.Request(f'posts/{id}/')
query = post(id=4)
api = snug.Api(
prepare=methodcaller('add_prefix', 'mysite.com/api/'),
parse=compose(
json.loads,
methodcaller('decode'),
attrgetter('content')),
add_auth=lambda req, auth: req.add_headers({'Authorization': 'me'}),
)
sender = MockSender([
(snug.Request('mysite.com/api/posts/4/',
headers={'Authorization': 'me'}),
snug.Response(200, b'{"id": 4, "title": "my post!"}', headers={}))
])
response = snug.resolve(query, api=api, sender=sender, auth='me',
loaders=loaders)
assert isinstance(response, Post)
assert response == Post(id=4, title='my post!')
@mock.patch('urllib.request.urlopen', autospec=True,
return_value=mock.Mock(**{
'getcode.return_value': 200,
'headers': {},
'read.return_value': b'{"id": 4, "title": "another post"}'
}))
def test_simple_resolver(urlopen):
resolve = snug.query.simple_resolve
@snug.query.from_request_func(rtype=Post)
def post(id: int):
"""a post by its ID"""
return snug.Request(f'mysite.com/posts/{id}/')
post_4 = post(id=4)
response = resolve(post_4)
assert response == Post(id=4, title='another post')
|
STACK_EDU
|
Tara sent me a note that caused me to read the ResearchBuzz article on Yahoo Labs before I had a chance to find it in my aggregator. While I'm impressed that the article doesn't point out the obvious (that it looks like a copycat maneuver in the Google war--nobody could think of a name other than "labs"?) right away, that's not what struck me about it. Neither is the fact that she paid me a nice complement at the end. :-)
What hit a nerve for me was this:
There needs to be more communication between Yahoo users and Yahoo. I had a question about AltaVista news last week, and the only communication tool I could find at AltaVista was an online form. (I used it but nobody at AltaVista ever responded.) And Yahoo needs to have some kind of intention about what they're going to do for search--not just following Google but striking off in their own direction.
I couldn't agree more. I think that Yahoo needs to get beyond using stupid comment forms that generate e-mail into a pseudo-CRM system as their primary vehicle for user feedback. There's been a lot of buzz at, around, and about Yahoo and RSS and weblogs. Yahoo needs to realize that this technology is used to open up communication and that this really ought include communication with and among Yahoo's users.
Several times, in discussions with PR/Marketing type folks, I've pushed for a more open feedback system. When Yahoo launches a new product or service, I think users deserve an open forum in which to communicate with Yahoo and each other about it. A form to e-mail system ensures that Yahoo will see their feedback (at least in aggregate), but nobody else will. Not even other interested users. I suspect that if Yahoo supplied a Yahoo Group for user feedback on a new product launch, the result would be open and honest feedback as well as new ideas. Users would interact with each other and share ideas for improving the new product or service. Yahoo would benefit and Yahoo's users would benefit.
Yes, there'd be spam, bitching and moaning, and so on. Does that mean it's not worth doing? I think not.
Really, it's not all that different than Asking Questions in Public. By not offering a Yahoo hosted place to discuss such things, Yahoo is turning a blind eye to the positive effects of the communities that form around new ideas, products, and services. Those who happen to have weblogs will probably end up posting their rants or praise in blogspace. Then the Yahoo PR, Marketing, and Product Managers will end up searching Feedster and following TrackBacks to find out what users are thinking and saying. (Don't get me started on the irony in that statement.)
I'm not saying people shouldn't blog their reactions and ideas. But if it's the only real mechanism to get it out in the open, that's bothersome to me. Isn't Yahoo, in large part, a communications platform?
I thought so.
On the plus side, I think that in the near future it'll become apparent that Yahoo is not simply following Google's lead. The intention is there. Trust me on that one.
Posted by jzawodn at January 20, 2004 12:02 AM
|
OPCFW_CODE
|
I have sensitive data to secure. My computer may likely be compromised physically in the future. How can I secure my date from various attacks like Evil Maid attacks and so forth. I have some basic knowledge of encryption and I can work with hidden Veracrypt volumes, but it doesn't protect me when attacker has physical access.
Tails is exactly what you want, being designed with activists in mind! It is a live Linux-based operating system that can run from a DVD or USB stick which is amnesic, meaning that any data that isn't explicitly saved to storage will vanish on reboot, including settings, metadata, and other information. Every time it boots up, it boots up fresh as if you have never used it before. Additionally, it forces all network connections through Tor, using a firewall to protect even from a compromised web browser.
Tails includes a variety of useful tools for document processing such as video and audio editors, text and document processors (generally compatible with the Microsoft Office suite), and a myriad of tools for cryptography, such as email clients supporting GnuPG encryption, chat clients supporting a wide variety of protocols (IRC, XMPP, AIM, etc) with chat encryption using OTR, and disk encryption utilities. It also contains Tor Browser, a browser designed for anonymity with use of the Tor anonymity network.
Basic evil maid attacks are prevented by keeping the Tails DVD or USB stick on your person, or downloading and verifying a fresh one if your primary copy was out of your sight for too long and may have been compromised. However, it is effectively impossible to protect from a sophisticated attacker who has prolonged access to your computer and is able to insert hardware backdoors.
Prevent someone with your powered off computer from learning what was done on it.
Protect the content of your internet communications from a Wi-Fi or ISP-level attacker.
Allow you to visit websites without the websites knowing your location (anonymity).
Tails does not
Hide the fact that you are using Tails from an attacker on the network or a local adversary.
Protect your computer from prolonged physical access before or while it is on.
Prevent you from making OPSEC mistakes, such as accidentally publishing your identity.
For more detailed information on Tails' threat model, consult the design document.
Disclaimer: While not a core developer, I contribute code to Tails and as such could be considered associated with it.
Whole disk encryption, UEFI secure boot, with TPM hardware.
This won't stop an embedded key logger from capturing the credentials, but it's about as secure as you can get.
If you want to go even further, link the decryption to a Yubikey (or equivalent) in challenge-response mode and even a key logger won't work.
Just remember that Rubber Hose Cryptography doesn't have a technical solution.
This is an important conversation, as the same thing needs to be considered with politicians and other possible higher-profile targets. While something like Tails (mentioned already) is great for your physical machine, it is also important to consider the email and online platforms that you use as they are often the target of phishing attacks. Not sure what the rules on here is about specific product endorsements, but if you are using a Gmail account then I would look into Google's "Advanced Protection" account. It takes your gmail account and forces you to do the following:
- Password requirement is stronger
- Requires a UFA key (like the Yubikey or Google's new Titain USB Key)
- Restricts apps that can connect to the account
- Strict rules about the devices that can log in
- Etc, etc...
WIRED ran an article on this when it was first released and this quote really stuck out to me: "If John Podesta had been able to turn this on sometime last year, the world might be a very different place."
Even if you don't use Google, then make sure that you are using 2FA everywhere you can (UFA keys are great too if supported), and consider using a password manager to generate secure passwords and that you are not reusing those passwords anywhere else.
Protecting against Evil is very hard, especially if you are the target. I'll write some complementary solutions to user10216038's answer.
Use a Live CD so that your operating system cannot be modified. You can prepare yourself with the necessary software; Disk encryption, etc. (see user10216038's answer.)
There is also a Tempest attacks where the adversary may steal your information even giving any effect on your laptop/PC. To secure against Tempest attack you have to buy specialized hardware and power filters. In some cases, a secured room may be required.
As a political activist whose computer may be of interest to local, unethical state operators, I suggest you really want something sort of deniable encryption.
Currently there is not a great deal of choice around implementations - but product recommendations are off-topic here. Googling the magic words "Deniable encryption" should take you to some potential sources, but do beware that the bad guys may also be watching your internet searches.
Make sure you already do routine preventive measures you have access to today, and its free. Again, protection methods reduce the speed of penetration and requires more skill level of the perpetrator so it deters 'evil maids' from accessing your unattended laptop to try and plant malware.
Note, how these features prevent access varies by make and model. Many 'high end' laptops have ways to bypass UEFI/BIOS locks, some easier than others. However, if they bypassed locks they also erase your former passwords, you know you've been penetrated. If you have UEFI with TPM and its required by your OS it may erase all Secure Boot keys as well as the ownership, and may stop, make it hard or alter the boot sequence, again showing something is not right.
Set a UEFI or BIOS based password for both ADMIN and USER? This prevents the operating system from loading so your device is stopped at the firmware level. This is more secure in general in UEFI setting with TPM.
Disabled boot from anywhere but the internal HD? This disables USB or any external ports, as well network boots.
Set a different hard drive password, if available? This prevents the hard drive from starting up, and has firmware independent of the UEFI of your laptop, in case its bypass able, see first item above. There maybe software to unlock the drive, but often it can be done by wiping it clean, or requires laptop disassembly, but if they managed to unlock it in situ, loss of password would betray its been penetrated.
Some manufacturers may have a hidden UEFI/BIOS reset to factory sequence built into their firmware accessible via the keyboard, such as Dell, the firmware is stored in a recovery partition on the HD. Access can be halted by setting a HD password, if available, as this prevents the HD from booting and is independent of the manufacturer's firmware beyond the UEFI. Regardless, with passwords reset it again reveals to you, something is not right.
Never sleep your computer, power down completely, versus cold boot, evil maid issues
Use Veracrypt as whole disk encrypt mode, and use the hidden operating system mode, which creates a decoy environment. Do the same for containers. Alternatively, use Windows bitlocker if on Windows, which is better is best left for another question.
Don't save sensitive files on your PC but on an encrypted card you take with you, if net access is variable quality or untrusted.
|
OPCFW_CODE
|
To consolidate its position as DIY and Gardening online leader, ManoMano has put technology at the heart of its strategy.
We are convinced it is by fluidly delivering qualitative products that we will be able to offer our clients and merchants all the functionalities to revolutionise their online DIY experience.
As a fast growing company, we are always on the look for ingenious and passionate people.
At ManoMano, we love and aspire to
- Push often and serenely to production
- Monitor code and functionalities (mistakes, product metrics)
- Take advantage of containers flexibility (docker)
- Leverage cloud scalability (AWS)
- Migrate to a micro-services architecture
- Cultivating continuous improvement
The Platform Engineering will help build our future by providing a global perimeter to address ManoMano business challenges carried by the Product teams. It is made of 6 teams.
Among those, the Pulse team aims to automate and industrialize observability, incident management, performances, FinOps to be able to observe and understand the whole ManoMano Tech and Business and handle the unexpected. It provides advice and planning and offers the necessary tools to the different feature teams to master these topics.
As a Performance Engineer you will apply your technical skills to solve some of the Internet's most difficult content delivery challenges. You will work closely with feature teams to implement strategies to improve our website, microservices and api performances. You'll have the opportunity to see your solutions and innovations come to life. This is a critical topic and we are looking for a good team player with a background as FrontEnd Developer to join us. We're looking for a great engineer who can help build great things together.
As Performance Engineer you will
- Work closely with business units to define quality of service requirements
- Work closely with engineering teams to guide the adoption of new processes and improve the performance of the main website, microservices, micro frontend and mobile applications
- Work with Engineering teams (Developers & SEO) to specify feature sets aimed at improving performance and facilitating measurement, monitoring, and benchmarking.
Of course, you will interact too with all manomanoTech teams and will be able to work on cross projects as well. Pulse is also at the initiative of some chaos engineering events and we expect you to participate and animate them.
- Mastering all good practices at the front to optimize the experience and desktop/mobile performance with Google Core Web Vitals
- Mastering performance measurement tools Google Lighthouse, PageSpeed Insights, Webpagetest.org.
- Experience with Internet protocols (DNS, HTTP, SSL)
- Show experience with algorithmic design and coding (e.g. Python, PHP, Go…).
- Having a background in performance would be a plus for mentoring other teams
- You have strong foundational knowledge of modern, distributed computing.
- Excellente digital culture and Data Driven
- Autonomous and adaptable (desire to work in a fast growing environment / Start-up)
- Humble et curious with a big appetite to learn discover and experiment new things
- Team spirit and great communication skills
- You come to work with good intentions and good vibes
- High level of English
React, JS, CDN, caching, web vitals, lighthouse, sitespeed.io, Datadog, SEO
We do at ManoMano
- Software craftsmanship: clean code, testing, peer programming, code review…
- Devops: CI/CD, observability…
- PHP/Symfony, Java SpringBoot, React, Gradle, Quarkus, Python, Go, NodeJS, MariaDB, MongoDB, ElasticSearch, Redis, RabbitMQ, Kafka, Docker, Linux, AWS…
- GSuite, Slack, Confluence, Draw, JIRA…
What we offer at ManoManoFast growing start-up environmentInternational & agile company 2 days per week work from homeSponsorings to external conferences - organisation of internal and external MeetupsCrafternoons every Thursday afternoon (share your knowledges, learn from others)Amazing work environment in Paris 17th, Barcelona & BordeauxMac, PC or Linux: it’s up to you!
|
OPCFW_CODE
|
For quantum computing to make the leap from theory and slim early use cases to broader adoption, a programmability jump is required. Some of the first hurdles have been knocked over in the last few weeks with new compiler and API-based development efforts that abstract some of the complex physics required for both qubit and gate-based approaches to quantum devices.
The more public recent effort was the open source publication of OpenFermion, a quantum compiler based on work at Google and quantum startup, Rigetti Computing, that is focused on applications in quantum chemistry and materials science. OpenFermion is more theoretical/simulation-driven than practical at this point since it less focused on hardware than planning coming problems in quantum chemistry and other areas, but it is a useful foundation for when quantum systems finally come online Google and Rigetti teams argue. As a side note, Rigetti has its own development environment for computing on its gate-based devices called Forest, which aims to bring high-level development tools and an open source base to a range of problems for hybrid (quantum and CPU) systems.
A less public but highly developed quantum programming project out of Oak Ridge National Lab’s Quantum Computing Institute takes similar steps toward bringing an open source framework to light for mapping quantum problems onto both D-Wave and gate-based (Rigetti and IBM, for example) devices. The Extreme-Scale Accelerator Programming (XACC) model takes a route similar to CUDA for GPUs today by emphasizing an offload model for specific pieces of a problem. It is also much like the companion OpenCL model, which is valued as a hardware agnostic approach to GPU acceleration.
Since there is no telling which device or quantum approach will win out over the next several years, the only smart approach is to build for the future problems versus hardware, according to one of XACC’s leads, Alexander McCaskey. Considering the many aspects of both the quantum physics and deep domain expertise in mapping problems to a device that must be abstracted, however, this is far easier said than done.
Before working on XACC and other quantum-related problems at Oak Ridge, McCaskey was part of an early team working on the programming model and development environment for Lockheed Martin’s D-Wave quantum computer in 2011. He has continued to work with programming tools for other devices as well, looking for similar elements between two different approaches to quantum computing—D-Wave’s qubit/annealing based chips and gate based systems.
More specifically, McCaskey and team are focused on what role these devices might play as accelerators for traditional CPU-based systems in the next few years. He says early progress in quantum chemistry applications are highlighting a realistic opportunity to accelerate classical HPC problems for the post-exascale era.
When it comes to creating programming abstractions for quantum systems there is a two-way divide, even if the model is created to be hardware agnostic. D-Wave and the quantum annealing approach versus the IBM or Google way with gate-based quantum computing.
“We have to think about the entire stack differently than anything we’ve seen before. With the D-Wave approach, you’re specifying a list of numbers for the machine hardware values, setting magnetic fields and qubit coupler values—what you want here are abstractions for that. For gates, you’re describing an algorithm that is made of gate operations on a set of qubits and need abstraction there and have data structures that can be pulled from a library and executed on to run the algorithm for a problem without worrying about the underlying gates,” McCaskey explains. “You want to provide data structures at a high level and a programing model that is familiar, which XACC does.”
“We designed and demonstrated the XACC programming model within the C++ language by following a coprocessor machine model akin to the design of OpenCL or CUDA for GPUs. However, we take into account the subtleties and complexities inherent to the interplay between conventional and quantum processing hardware.”
“XACC is similar in concept to how we program GPUs to offload work onto. We wanted something similar for quantum. It is a new way of thinking about programming at from the ground up with XACC as the foundational layer. From there, it is possible to build libraries and applications on top of that.”
It is a big claim: creating a higher level abstraction to hide the physics complexity of two entirely different types of quantum accelerators with the same overarching API. Further, as with other high level programming tools to speak at a higher level to accelerators like OpenCL, the generalization that allows broader use comes with a hit on performance. In short, nothing comes close to the performance when programming to the metal. McCaskey says it is early enough in the quantum game that generalizing a programming framework is more feasible than it sounds.
“For quantum, we really don’t know which vendor or quantum model type will be the best in 20 years. At this point, we have to define our programming models in a way that is hardware agnostic. Are we losing performance because we don’t know the underlying architecture? Not with QPUs with such a low number of noisy qubits these. Keeping our model QPU agnostic provides abstractions for transforming and optimizing the compiled kernels—that is a good starting point.”
“I want people to be able to say they want to run a variational quantum Eigen solver for quantum chemistry. They have an input file that describes the target molecule and XACC does the rest; the quantum compilation producing the gate sequence, and offloading it to the chip. That is the goal for complexity and abstraction,” McCaskey says. As one might imagine, however, this is not something any programmer can pick up and run with in production for complex applications.
At a high level, it is possible to play around the with the software, but for real problem solving, deep domain expertise in the application area is critical and it is also important to understand quantum computers via a course, McCaskey says. “For gate based chips, even with XACC, you need to understand some quantum physics to understand what the gates are doing, but if you just want to try out examples with the frameworks that takes far less expertise.”
McCaskey sees a future for supercomputing that is heterogenous, except instead of offload to GPU accelerators, some of the application is handled on a QPU. Oak Ridge National Lab has already been exploring hyper-heterogeneous machines (including using neuromorphic, quantum, and supercomputing hybrid machines for deep learning). “We can leverage many different types of acceleration and think about a brand new hybrid classical quantum set of algorithms that push past current bottlenecks for existing intractable problems.”
There is quite a bit of detail about the programming model in this detailed paper on XACC.
Sign up to our Newsletter
Featuring highlights, analysis, and stories from the week directly from us to your inbox with nothing in between.
|
OPCFW_CODE
|
post accutane itchy skin
Air as determined get low, accutane class action 2013 sharing food work eye, stye accutane viagra accutane. On tobacco while on, accutane roughly square slugs of how bad are accutane side effects. Volcanoes refueling, well get an adjunct who should not use, accutane assistant locked in nose piercing on, accutane. Metropolitan health well, recently entered inclosure small bumps after accutane of mouth do accutane stunt growth. Oily nose, accutane. Site finasteride, cream, to use after accutane. Propecia proscar the relevant harm plentiful budget choice hsa bleaching your hair on, accutane. Members, nose piercing on accutane. All this installation service affordable the public health evaluation from, accutane, stye. Greek post accutane itchy skin schismos all colleges include mario capecchi who carried a, pg diploma is dependent on what, to do while on accutane planner there a pharmacologist one, induced glucose will accutane make you, tired. Accutane red neck. Levels in as dispensing fee to get anything, curves what they visit humans to maintain client universal influenza, immunization needs debra winger pos electronic fingerprinting bumps on face, after accutane. Provider hospital care, covered by taxi driver or appreciate the national specimens, submitting notes on traditional accutane blood stool. Kampo walkway why can you get microdermabrasion while on, accutane. Buy wrinkles go away after accutane. Discount, is accutane, nodular acne. Best interest to evaluate average pharmacy assistants staff, russian national library of public activities should not taking accutane and prednisone. News, world requirements for, dispensing accutane s questions to ask before taking accutane i pulled alleviating a accutane pink lips limousine as usual, he abuse programs under viagra, accutane near you accutane sweat glands baker etc to, receive as well my taking accutane, for mild acne.
Pruritic rash and, assortment notched lemma ref 4th week of accutane. Isreal how well does accutane, work for acne downstate class deccan use, this function failed to how much is accutane monthly. Fill a line job bumps, on face after accutane. Limit, can accutane work immediately. Long should wait get, pregnant after accutane their knowledge red spots on, face accutane. Of your psychiatrists propecia, while on accutane. Psychologists mft do accutane stunt growth s usually, think it unpaid although i d me authentic 1 beer on accutane ofuro, baths in function see clustered around buy accutane in malaysia. So bright as, are particular service validations blood pressure polytechnic ooty i, envision guise or accutane low, libido. A decade cost, effective low, dose accutane 20 mg, side effects. Stealing but is it safe to get a, tattoo while taking accutane they find med allied health accutane weight gain loss insanity, but show that suggests nose piercing on accutane. A cadaver donor sava healthcare, strategy can accutane, cause heart attack accutane roche 10. Is how long, do i have to wait after accutane, to wax. Ensured bloody, snot accutane. Accutane side effects percentages. Through crowd what accutane face wash s just ask 14 days, on accutane.
will accutane affect future pregnancies
Pastries baguettes and earn money accutane pink lips. Orders, of print binding booth just how to, overcome side effects of accutane tell you, prepare and arts behind its consistent low, priced here you stuff hospitalists do have, adversely gachibowli mr accutane nodular acne. And areas tight budget, to sell accutane, questions. Pls is, it ok to get, a facial while on, accutane tell their baking process, or slots of philosophy class where space, doctor won't prescribe, accutane. Extraordinary lengths to accutane and, stunting growth. Funding research insurance rejection, can i take protein with accutane. How to take care of skin after accutane. Submit your closing questions proposed arm accutane vs acnotin sclerosis the bold reds built adjacent balance round two of accutane payment the selected can you use accutane and retin a, at the same time. Vaccines, throughout their educational conversion accutane in ireland. Programme a lifeline doppler ultrasound xray close soy, was to accutane heat rash. Include good, results from accutane drug now a necessity high cholesterol, due to accutane. Premedicine with other officer how bad are, accutane side effects. Executive, probation parole sanctions and document vitamin a sensitivity after, accutane. As to many hooters girls name accutane bad effects change, in what expresspay information on chambersburg harrisburg and vaccine accutane, isotretinoin 20 mg. Gardasil which serves accutane minimum length.
Characteristics doxycycline accutane together of firtree regionally based upon so bad, for most common effects, of accutane. Christmas 20, mg accutane once a week store bharathi accutane lawsuits in canada. Complex cases assuming additional accutane itchy information bold reds, built during azogesic azostandard what is the difference between, retin a and accutane baridium accutane harmful, side effects. Phenazo prodium pyridiate pyridium reazo uricalm, blades broken glass of their career walmart accutane, 4 months and still breaking out. Guidelines lifts that people, accutane, 3 rounds. Whatever i accutane and growing especially when it subspecialty this is as relocation can, accutane cause heart attack. Program and when can i, start waxing after accutane there to, side effects of, accutane in pregnancy. Recommend that were predictions that shall accutane and sciatic nerve provide accutane no hair loss. Tar solution organize, accutane cost with, blue cross insurance. Your preferred high, dose accutane rosacea. Network pharmacy red, spots on face accutane low dose or high dose accutane assistant bachelor accutane nodular acne. Capital do pharmacists it, has come by convenience you accutane side effect eczema. Signs, that acne is coming, back after accutane.
Closing conditions entrance fee covering few jobs, varieties as pregnancy and medical science citation accutane night cream. Title master, druggist chicago criminal records shall attach exchanges related formalities, etc personal circumstances for what, happens after accutane. Help accutane 12 hours apart. Across core knowledge application, broader does accutane, cause skin rashes. Area choice at grade renos cholesterol, levels and accutane gardening diy farming, can, i take vitamin e on accutane. Creative side heart pain accutane. Objectives how, bad are accutane side, effects many thousands of getting accutane in, canada. Interviews will, accutane make you tired.
accutane images before and after
Barley supervising department accutane obagi. University of junior, research accutane melatonin. Opportunities processing this create your card that accutane is a miracle drug. Hinges on, easily brainwashed by our specialists put rand chapped lips accutane mcnally accutane don'ts. Erythromycin while on accutane. Accutane and, brittle bones. Road accutane, i norge it, is accutane, still available in the us in accutane, labeling mind accutane nodular acne. I most provides counseling acne, comes back worse after, accutane. And dispatch workers accutane dangerous. Not, presumptive evidence could not include such as diabetes supplies hourly, rate or civilian health insurance information stepping accutane obagi. Outside the accutane cost with blue cross insurance. Regulated, medical center perfect comedic escape contractor submission omeprazole and, accutane of competent oral, nodular acne accutane. Hypoglycaemics paragraph d or going for obvious answers back he, accutane recall 2013 evaluated accutane self confidence. On life drinking milk with accutane. Xa will be successful career progression buy accutane online united states and isotretinoin, accutane diary. Sent it, technicians thinking about vaccines tuition and upset with quality consenting to, allow blood sugar free babysitter i was 1 beer, on accutane. Wrong needle is there, s on viagra premarin buy accutane online united, states. Omeprazol azitromcina can you use accutane, and retin a at the same, time. 1 beer on accutane. At accutane low dose for oily skin. Each consists deafness but, don shortcrust taking vitamin a instead, of accutane. Pastry as what, are the effects of drinking, alcohol while on accutane many latinized sarum arts and accutane, and retin a at the same, time.
Post accutane itchy skin. English skills, profession and had made professor requirements conference describe incontinence d, contained in pharmacies who accutane thin skin permanent. Have about prescriptions online accutane labeling. Reserve does accutane cause infertility. Nice, people do eye, stye accutane so firm to copy their customers i accutane pop pimples. Getting accutane in canada. Staffing, accutane diary week 4. Norms washing your face on accutane as one but thomas jefferson university and disaster preparedness, exact address email here s largest anesthesiologists we so, complete resource paul i am alternatively they found in, accutane labeling medicinal chemistry finer taste aldactone vs accutane. And stomach problems accutane. Network pharmacy of power, allocated in favor of locked automatic do accutane stunt growth components each should i take, vitamin e with accutane yacht, differs from many fields nose piercing on accutane. Medicine accutane warning commercial. Trauma emergency taking plan b while on, accutane. Artwork and, include optimizing assets bachelor entered the process which payment, acne cure, accutane. And opens can accutane cause brain damage evaluating kerala oil returns, after accutane. What drugs can you, not take with accutane.
|
OPCFW_CODE
|
XM Cloud Forms Builder released
Composable Forms Builder is now available with Sitecore XM Cloud. Let’s take a look at this one of the most anticipated modules for Sitecore’s flagship hybrid headless cloud platform.
Historically, we had an external module called Web Forms for Marketers that one could install on top of their Sitecore instance in order to gain the desired functionality of collecting user input. This module was later well reconsidered and reworked, later finding its reincarnation as Sitecore Forms, an integral part of Sitecore platforms since version 9. Customers enjoyed this built-in solution provided along with their primary DXP, however with the headless architecture of XM Cloud there were no CD servers any longer, therefore no suitable place for saving the collected user input. There was clearly a need for a SaaS forms solution, and this gap if finally filled out!
An interesting fact: until the release of Forms with XM Cloud the relevant composable solution for interacting with the visitors was Sitecore Send, and because of that Sitecore logically decided to derive XM Cloud Form module from Sitecore Send codebase (as it already had plenty of the desired features), rather than from legacy Sitecore Forms.
So, what we’ve got?
The main goal was to release a new Forms product as SaaS solution that integrates with any custom web front-end. The actual challenge was to combine the ultimate simplicity of creating and publishing forms for the majority of marketing professionals along with tailoring this offering optimized for typical headless projects. In my opinion, despite the complexities, it was well achieved!
Let’s first take a look at its desired/expected capabilities:
- Template Library
- Work with Components Builder
- Use external datasources for pre-populating form
- Reporting and analytics
- Ability to create multi-step and multi-page forms
- Conditional logic (not available yet)
One would ask, if there’s no CD server or any managed backend at all, where does the submission go into? There might be some SaaS-provided storage along with the required interface to manage the collected input. Incorrect! There’s none. It was actually a smart move by Sitecore developers who decided to kill two birds with one stone: save effort for not building a universal UI/UX with the tool that will hardly satisfy variable needs from such a diverse range of customers/industries, that would be hardly doable. But the second reason is more legit: avoid storing any Personally Identifiable Information data, so that it won’t be processed within XM Cloud, leaving particular implementation decisions to leave to customers’ discretion.
Let’s see it in action!
First, let’s pick up the layout by simply dragging and dropping it on a canvas. For simplicity, I selected Full Width Layout. Once there, you can start dropping fields to a chosen layout:
- Action Button
- Short Text
- Long Text
- Select (single dropdown)
- Multi Select (where you can define the number of options, say selected 3 of 9)
- Checkbox (single)
- Checkbox Group
- Terms & Conditions
- Social Media – a set of clickable buttons to socials you define
- Image, which has a pretty strong set of source options:
- Background Color within that field – transparent is the default one. You can even put a background image instead!
- Settings for the field box shadows which also define Horizontal and Vertical Lengths, Blur and Spread Radiuses, and of course – the shadow color
- Help text that is shown below and prompts some unobvious guidance you’d like a user to follow
- For text boxes, you can set placeholder and prefill values
- The field could be made mandatory and/or hidden by the correspondent checkboxes
- Validation is controlled by a regex pattern and character length limit
- Additionally, you can style pretty everything: field itself, label, placeholder, and help text, as well as set the overall padding to it
Please note, that at the current stage, the edited form is in a Draft state. Clicking Save button suggests you run your form in a Preview before saving, and that was very helpful – in my case, I left the Full Name field as a hidden field by mistake, and preview mode immediately showed me that. After fixing the visibility, I am good to go with saving.
The Forms home screen shows all the available forms. To Activate, I need to create a Webhook first, then assign it to the form. In addition, you define the action you want to do upon webhook submission – redirect to URL, display success message, or maybe – do nothing, as well as configure failure submission message.
This time Activate button works well and my form is listed as Active. From now on you cannot edit fields anymore and you cannot change the status back from Active. Therefore always verify your form in Preview before publishing.
XM Cloud Roadmap Guide
XM Cloud is the future of enterprise content management offerings. The new sites, pages, and components tools offer an efficient content author experience that is not available with other CMS systems.
Weirdly, you cannot even delete a form in Active status. What you can do however is a duplicate active form into a draft one, and you could go on with fields editing from there.
The most obvious desire at this stage is to real-test your form before usage. And luckily developers took care of that as well.
I give it another try to test how validation works:
Once validation passes, Test Form Submission dialog shows you the JSON payload as it goes out along with HTTP headers supplied with this webhook request. Let’s hit Submit button and see the confirmation – I chose to see a confirmation message that shows up.
Webhook catcher shows all my submitted data along with HTTP headers, everything was sent and received successfully!
What’s Next? Pages Editor!
Let’s use this newly created form from XM Cloud pages. Please note a new section called Forms under the Components tab. That is where all of your active forms reside. You can simply drag-drop this form to a desired placeholder, as you normally do in the Pages editor.
Please note: you must have your site deployed to an editing host running on Headless (JSS) SDK version 21.6 or newer to make it work – that is when XM Cloud Forms support was added. In other case, you face this error:
Experience Editor and Components Builder
- Create and Activate a new form from Forms editor
- Consume it from the Components builder into a new component using BYOC, then publish this component
- Open Pages app, find the component with an embedded form you’ve built at step (2) and drop it to a page, then publish
- Open that same page in Experience Editor
Live Demo in Action
As you know, often a video is worth a thousand words, so here it is below. I’ve recorded the whole walkthrough from explaining to showcasing it all in action up to еhe most extreme example – when you create and publish a form, then consume it from the XM Cloud Components builder, making the part of a composite component, which in turn is used Pages editor to put down on a page which also opens up successfully in the Experience Editor. Unbelievable, and it all functions well. Just take a look yourself:
As developers, how would we integrate forms into our “head” applications? That should work with a Forms BYOC component for your Next.js App coming out of the box with SDK. I spotted some traces of XM Cloud forms a while ago as a part of Headless JSS SDK 21.6.0 a while ago when it was in a state of “Canary” build. Now it got released and among the features, one can see an import of
SitecoreForm component into the sample next.js app, as part of pull request merged into this release.
The documentation is available here, but … everything is so absolutely intuitive, that you hardly need one, don’t you?
Since Forms are bundled into XM Cloud they’re included with every XM Cloud subscription.
What is missing?
- file upload feature is not supported – webhooks alone are not sufficient to handle it
- ability for customization and extension – hopefully, it comes as there’s an empty section for custom fields
Hopefully, the product developers will implement these and more features in the upcoming releases. But even with what was released today, I really enjoyed XM Cloud Forms builder!
|
OPCFW_CODE
|
This is a summary of the the drugs on the DEA website. It include drug class (i.e., stimulants, depressants, hallucinogens), categories (i.e., amphetamines, cocaine, barbiturates) and drug synonyms. Synonyms include brand/generic names (Adderall®, Vallium®, clonazepam) and street names (Apache, White Girl). Eventually the ontology needs to be expanded to add a grouping variable for related synonyms. For example, the generic clonazepam, the brand name Klonopin® and the street name k-pin all should be grouped with the general name clonazepam
Two files, dea_factsheets.rda and dea_brands.rda, were created by scraping the “fact sheets” on the DEA website on September 10th 2020. The fact had 12 records added because the DEA slang file has additional “categories” (e.g., crack cocaine, mushrooms, PCP, etc.). These extra “categories” are problematic for the DEA ontology because they also include a specific brand name (i.e., ritalin) and two benzodiazepines (i.e., alprazolam, clonazepam). Eventually these need to be added as part of a “generic name” level in the ontology.
|drugs of concern|
Two pdfs, with slang1 and controlled substances2 were download and processed to make slang.rda and controlled.rda.
dea_street_names.rda (N = 1731 records) contains 26 drug categories and 3 brands (e.g., Klonopin, Percocet, Xanax).
substance: Chemical and/or brand names
schedule: I, II, III, IV or V
narcotic: Y or N
synonym: Chemical and/or brand names
names variables will be very difficult because the delimiters between drugs are not at all consistent.
There are several things (in the black box) that need to be placed in the tree. Things with the red call out are inconsistencies.
This is a summary of the the drugs on the no slang website.
One file, noslang_street_names.rda , was created by scraping the Drug Slang Dictionary October 12th 2020.
Additional drugs found while processing data from IQVIA. Thanks to Edward Nunes M.D. for providing notes on how to better classify these drugs. This data introduced new classes such as reversal agents and treatment drugs.
This is a vector of drug-specific stop words that have been observed while processing data from the aforementioned data sources. This vector is primarily used with the
parse() function and is used to remove irrelevant words (i.e., “pills”, “syringe”) or strings such as units or dosages of drugs (i.e., “mg”).
|
OPCFW_CODE
|
Best Practices in Hardware Version Control Systems
Table of Contents
Some PCBs are simple enough that you don’t need multiple designers working on a large team. Design files basically come in two forms: the initial project files, and the final project files once the design is complete. The way my team works, we often receive some design files from a client to help us get started, and we have to manage everything ourselves after that. Any project can get very complex, and the PCB design team needs to track revisions throughout a project.
Why worry about tracking hardware revisions? In the event you ever receive changes to product functional requirements, major changes are made to your product’s architecture, or you’re ready to finalize the design and prepare for fabrication, it’s best to clone a project at its current state and begin working on a new version. Keeping track of all these design changes in a PCB design project takes the type of tools for hardware version control for PCB designers you’ll find in Altium 365™. In this article, we will discuss hardware version control best practices and version control numbering best practices.
What goes into hardware version control and when should you use it? What does it take, for that matter? The software world has been using version control for electronics ever since Linus Torvalds created Git in 2005, and useful hardware version control systems have struggled to catch up until only recently. Hardware version control provides a simple way to keep track of older electronics version control of a PCB design project, encompassing everything from schematics to fabrication documents and mechanical drawings.
A version control management system is responsible to track hardware changes and managing successive changes to any collection of information, including PCB design data. In my opinion, version control for electronics is a necessary feature to take advantage of when possible, especially if your team works on complex PCB design projects with multiple collaborators. The best version control systems for hardware development provide some important functions and pieces of information:
- Chronological list of revisions to a project. Any added or removed design documents, changes to PCB design data, commit dates, and any comments on design revisions be viewable in chronological order.
- Who made the changes. The user responsible for making and committing changes listed in the revision history should be listed alongside each hardware revision.
- Created PCB project releases. Anytime a project release with output files is created, the release should be viewable and accessible in the revision history list.
- Ability to download or clone each revision. The ability to clone a previous revision of a project as a new version is central to version control.
Version control systems can track all of these data and revisions on a local server, or in the cloud with a managed server. This gives you access to earlier versions of a project, either to rollback/clone a project in a previous state, or to simply download the old project data for other purposes.
Whether you’re tracking revisions on software projects or PCB design data, you may need to roll back to an earlier project version for a number of reasons. If you do plan to clone a project within your version control system, here are some times when you should consider cloning a project.
Customers or an engineering team may change your product’s functional requirements for any number of reasons. When there is a change, it is a good idea to clone the project in its current state and apply revisions to the cloned project. By forking the project to a new version, you can always revert back to the previous project if the new functional requirements are abandoned.
You should always sanitize your BOM before you begin a new design, especially before beginning a new PCB layout. Even if you looked into component stocks early, the supply chain can change quickly, and it’s possible that important components have gone EOL, LTB, NRND, obsolete, or out of stock.
If this happens with an MCU, FPGA, or other specialties IC, the replacement component may contain an entirely different pinout. Here, you can clone the current project and place the new component in the new project. In the event the old component becomes available, simply roll back to the old project. I’ve found this is useful when a client is a dead-set on a component like an MCU that they can’t source, but they still want a manufacturable board with sourceable components. You’ll definitely put a smile on their face when you hand them both versions of the project.
Once you’ve released your design data to your manufacturer (and if you paid the NRE charge), they may make some changes to the layout or output files. I always make a copy of the finalized project before releasing it to the fabricator, and then I tell their team they can make whatever modifications are needed. They’ll normally send back a set of design files with any required modifications applied directly.
In the image below, Altium Designer makes it easy to clone a project on the Altium 365-managed content server without going through my web browser. I already have the project entered into version control and synced with the files in my Altium 365 Workspace, but I can easily clone the project and save a new copy to my Workspace; there's no need to download and re-upload the cloned project. I could also use this cloned project as a new variant, reuse it in a new design, or use it as a backup as needed.
There are many other ways and reasons to use hardware version control for your PCB design projects. No matter which task you need to complete, you need to use a world-class data management system that integrates directly with your PCB design software.
Altium 365 is the Future of PCB & Hardware Version Control
Version control is a simple, yet powerful concept that more designers should consciously embrace. When I was first getting started working with a remote team, each designer would track their own series of project revisions on their local computer. In some cases, the client would handle these tasks using a proprietary system. These solutions are inefficient, and third-party solutions won’t integrate with your PCB design software.
The same version control processes used for software can be used for hardware version control with Altium Designer® and the Altium 365® platform. Design teams can create a productive and collaborative PCB design workflow by bringing designers, end users, and manufacturers into the development process. Everyone on your team will have access to supply chain data, commenting features, and data-sharing tools to help streamline PCB design collaboration.
We have only scratched the surface of what is possible to do with Altium Designer on Altium 365. You can check the product page for a more in-depth feature description or one of the On-Demand Webinars.
|
OPCFW_CODE
|
I received an email recently from MySQL regarding new MySQL training. In reply I stated that due to concerns about the direction MySQL was taking in closed source releases, and some features only being made available in the Enterprise package, we would be moving our companies servers, and client servers we manage, over to PostgreSQL.
I have been a long time advocate of MySQL and was surprised at the direction SUN are taking with it. Sure they need to make money, but does it need to be at the expense of the community that put MySQL into the market position it now enjoys.
Many contributors to the MySQL source code are aggrieved that their good work, freely given, has now been put outside their control and that the benefits of contributing to an open source project will not flow back to them, and instead, be locked away to be sold off with the proceeds flowing directly to SUN.
Of course, SUN need to realize a profit on their investment with MySQL and need to have a product. Have they not learned anything from the Open Source community. We give freely, and benefit from support contracts, or from other revenue raising initiatives, not from closing up the source and simply selling that as the product. This is a very 1990's approach to marketing that SUN have yet to break clear of.
In response to my email to MySQL I was pointed to a blog link that was "much closer to reality". The blog item heads up with "Give MySQL a break please" as if we should be feeling some sort of pity for their current turmoil regarding this issue. "Why don’t you all give MySQL a break here please?" it begs further on and then cites that they are within their rights, as the source writer, to choose whatever license they please.
This point has never been in contention, nor has the right of an individual, or company to make some bucks from open source projects, it is the bread and butter for many of us and the reason many are now migrating to PostgreSQL to avoid a possible lockout from SUN to the freely available, feature rich, version of MySQL.
The PostgreSQL database has several commercial spin-off such as Greenplum and EnterpriseDB, but these companies who build on top of the PgSQL database product, do not own the source code to it, as does SUN own MySQL. There can be no closing of source, or selective feature releases as they do not control these aspects. These companies are free to add whatever functionality, in a closed source manner, and call it whatever they will. It should also be noted that Greenplum teamed up with SUN to produce a Data Warehouse Appliance.
People are migrating sooner rather than later to avoid just this scenario. Questions asked of me such as:
- How do you keep growing?
- How do you keep paying more open source developers?
- How do you pay for the millions of web hits each day?
- How do you pay for the millions of downloads, the Tera bytes of Internet traffic?
- How do you guarantee your long term survival?
- How do you strike a balance between commercial success and widespread open source adoption?
- How do you keep your investors happy as well as your community?
If SUN has not done their homework on answering these questions before purchasing MySQL then the future of MySQL is sealed and does not look too bright. And if the only open source model they can bring to bear is closing the source in any form, then the open source community reserves the same freedoms as SUN/MySQL in adhering to their own values and choosing an open source alternative.
For a peek at the original blog posting by Matt Casters, pop along to http://www.ibridge.be/?p=110 and see my response to his initial posting.
|
OPCFW_CODE
|
Unix Error Log Files
To log "0" for at any time the syslogd daemon and its configuration. The program did not produce any to view the btmp file. More information is available16.04 LTS? 2011 MacBook Pro upgrade?
Eg: service smbd status If I execute examples of application logs, and information contained within them. Do pulled hair from files http://yojih.net/how-to/solution-unix-commands-error-log.php a probability represent anything? unix How To Check Logs In Putty the client browser reports about itself. These log files are typically plain ASCII text in a standard log file files much lately I have to admit.
Not all facilities are present know how to get May 2013 logs from general messages ? Which towel log to all users.The server reports the file-system path (as opposed Linux record the software installation process?
See the end of this document for some essential commands that may help you check out deadmanssnitch.com.. How To Check Logs In Linux Server There are several things that are logged in /var/log/messages including mail,continuously monitor the error log for any problems.An asterisk in place of either the facility or the priority indicates "all." Foris located at /var/log/rkhunter.log.
The default priority is user.notice. -t tag Mark each The default priority is user.notice. -t tag Mark each October 23, 2015, 9:05 pmTHX!These messages may prove useful for trouble-shootingTherefore, it is possible for malicious clients to insert control-characters in the this error…..
if the sepoy mutiny of 1857 had suceeded?DNS server if any clients query log will stores /var/log/messages files i Linux /var/log/messages I have been trademark of The Open Group. Reply Link D0rk June 9, 2011, 8:53 pmsome distributions arearchive old logs?
The quote character (") must be escaped by placing a back-slash before itfiles real time, as the application is writting to it, use "tail -f".Piped Logs Apache httpd is capable of writing error and access logyou're looking for?Browse other questions tagged error-handlingThat is, they have automatically been renamed after click site
at the command prompt.After even more time the log files are compressed withstill have its requests sent to the main server logs. By using a graceful restart, the server can be instructed to open https://www.tutorialspoint.com/unix/unix-system-logging.htm two fields, the selector and the action.Some of these lognewsyslog or logrotate for more details.
files that are located under /var/log/ directory. What is way to eat rice with hands in frontin the next sections.During testing, it is often useful toconsists of an asterisk (e.g., *).Please Ubuntu, this log may be a good place to start.
Then a program like split-logfile can be used to post-process the access unix Torvalds or The Open Group in any way.Reply Link Jaquice July 8, 2016, 6:40 pmI have added my regular user or is there any possible to enable and log it.Thanks in advance. Linux Log Files Explained the apache2 and mysql configurations are stored for example).
Thanks for your knowledge sharing. "Log files are god for us(sys admins) to news The logger command sends logging messages to that ensures that the program does not fail, and you could further log the output.Also, every time any error occurs, Apachethis value should not be trusted because the user is not yet authenticated.Again, use grep to find specific information, pluggingspecified directly in the CustomLog directive.
By the way using " or send the message to a user's terminal. July 21, 2013, How To View Logs In Linux Command Line Echoing Messages to syslogd With Logger A neat utility exists in the loggeran improper pronunciation of Esperanto?The process-id is for use by the administrator in restarting and terminating the daemon by system logger is quite straightforward.
For example, put it in a fileby the space You can use Manpage Help to check complete syntax for this command.Reply Link mahi January 22,or errors for that virtual host will be logged only to the specified file.Debug Log The debug log at /var/log/debug and provides detailed debug messages fromor send the message to a user's terminal.Important Log Locations All the system applications createbehaviour you should look into tee(1).
By Vivek Gite on July 17, 2006 last updated December 6, http://yojih.net/how-to/repairing-unix-server-error-log.php Link harikrishna February 12, 2016, 12:40Rights Reserved.However, this configuration is not recommended pm Ur website is very useful. Linux Log Command do with cron apparently.
Reply Link charles November 6, 2012, 10:55 pmCan someone kindly assist me with scp send email. to include or exclude requests where the environment variable is set.Log Rotation On even a moderately busy server, the quantity or ask your own question. I would like to know where are the
The Apache HTTP Server includes a is less powerful but also less complicated than vim or emacs. a selector, an action is performed. files UNIX is a registered /var/log/maillog error Not the answerthe help of environment variables.
How can I make two in the name of the daemon you're interested in. /var/log/mail/ - This subdirectory contains additional logs from your mail server. /var/log/syslog Player claims their wizardto a program.
Not the answer contain debugging output from CGI scripts. This is a configurablelogs or ask your own question. It is a system utility example, *.debug means all debug messages, while kern.* means all messages generated by the kernel.
For example, put it in a file rights reserved. The format of the found in log file?words are not accumulated in only one line necessarily and my be one plus line.
Viewing Files To simply look at or ftp or anything applicable between my laptop running win7 and my server running fedora-16. This technique is very useful for a small number of virtual hosts, but /var/log/lpr.log or /dev/console.of westerners such that it doesn't appear to be yucky?
Probably the most important log is the file /var/log/messages, which records a ID to group root but I am still unable to read the file /var/log/secure. In my /root/ folder I have crons.cron which Second, the client requested the resource /apache_pb.gif,If CustomLog or ErrorLog directives are placed inside a
Does compactness depend Mail.none,authpriv.none,cron.none indicates that those error messages should files are distribution specific.The format of the command is − logger
|
OPCFW_CODE
|
As it became known, Microsoft officially confirmed the acquisition of Github for $ 7.5 billion. And everything seems to be normal, the new CEO of Github will be Nat Friedman - this is the person behind the huge amount of open source software. But the feeling that the Big Brother continues to take away from the free world still more space does not leave me.
Microsoft, seems to be a decent company, the developer of the most popular operating system and office application in the world. When the company purchased Skype, and included it in its operating system by default, the world did not notice it. True, in all honesty, Skype did not become better. And in the last update appeared interface, which is already frankly annoying. All the talks about the fact that Skype are being used by bad guys immediately died down. And the bad guys themselves have gone from Skype to more convenient networks.
But another thing is Github - this is the largest open area for open programs. There are source codes of any open source software. All self-respecting crypto-projects put there the source code of their programs, blockchains and smartcontracts. If the project does not have source code for Github, it is considered that either it has something to hide or source codes are copied from somewhere.
But here's the question, where are the sources of Windows or Mac OS? We will not find them there. And why? Everyone already knows, even to children, that Windows 10 collects too much information about users without their consent. There are even special guides, how to "fix" the system registry , to prevent personal data from being sent to Microsoft. But the source code of the operating system is closed.Why does this closed system need the largest repository of all open programs in the world? And the importance of this, as some call Github - a dump for encoders, is so high that Big Brother gave 7.5 Instagrams for this lasciviousness.
There is a premonition that Big Brother always scares places where there is open information, he wants to control everyone and everything. i think we should expected that in the work of Github there will be innovations that are unlikely to please the free community. Thus, for example, Apple, for a long time did not miss the update in the AppStore Telegram, which contained elements of the new decentralized infrastructure TON MTProto. There is no guarantee that Github, controlled by microsoft, will not be able to remove "harmful" development, for example, the sources of anonymous currencies or instant messengers.
The most important signal that everything is not so good was that the developers urgently ran to transfer their projects to a still free and uncompensated Gitlab, and began to do so massively that poor Gitlab could not stand the inflow of new users and lay down.
We should be patient and wait for what other elements of the free infrastructure the Big Brother wants to buy in the near future. Let's hope when TON is launched in Telegram, it will work longer, before it becomes the property of Microsoft or Goldman Sachs.
|
OPCFW_CODE
|
import { SpecialCharacterHelper } from "../../helpers/SpecialCharacterHelper";
import { EnumType } from "../../interfaces/AvroSchema";
import { ExportModel } from "../../models/ExportModel";
import { BaseConverter } from "./base/BaseConverter";
export class EnumConverter extends BaseConverter {
protected rows: string[] = [];
public convert(data: any): ExportModel {
data = this.getData(data) as EnumType;
this.rows.push(...this.extractEnum(data));
const exportModel = new ExportModel();
exportModel.name = data.name;
exportModel.content = this.rows.join(SpecialCharacterHelper.NEW_LINE);
this.exports.push(exportModel);
return exportModel;
}
protected extractEnum(data: EnumType): string[] {
const rows: string[] = [];
rows.push(`export enum ${data.name} {`);
for (const symbol of data.symbols) {
rows.push(`${SpecialCharacterHelper.TAB}${symbol},`);
}
rows.push(`}`);
return rows;
}
}
|
STACK_EDU
|
from .builder import build_hook
class HookPool:
def __init__(self, hooks, modes, logger):
self.hooks = []
self.modes = modes
self.logger = logger
self.register_hooks(hooks)
def register_hook(self, hook_cfg):
hook = build_hook(hook_cfg)
if set(hook.modes) & set(self.modes):
self.hooks.insert(-1, hook)
else:
self.logger.warning(
f'{hook.__class__.__name__} is not in modes {self.modes}')
def register_hooks(self, hook_cfgs):
for hook_cfg in hook_cfgs:
self.register_hook(hook_cfg)
def fire(self, hook_type, looper):
for hook in self.hooks:
getattr(hook, hook_type)(looper)
|
STACK_EDU
|
What is this site?
This is site was written by Grant Sander, and is an extension of cosmalearning.com. It was developed to support learners in exploring both math and programming. The site consists of various tutorials, examples, and exercises, ranging from basic programming concepts to some sophisticated mathematical ideas. Througout the site you will see code snippets that look like this:
print("I'm an editor!"); print("You can edit me, and run the code.");
Click on the code snippet to pull open the code in the editor on the right-side of the screen. You can edit the code in the code-editor, and then press the "Run" button to run your code. The output of your code will be shown below the code editor. You can also press Ctrl + Enter (or Cmd + Enter on Mac) to run your code.
You will see the
print() command very often throughout the site. The
print() command will print an output to the output section of the editor. As an example, if you want to know what 4 + 6.5 is, you could run
print(4 + 6.5);.
What is Coding/Programming?
Coding, or programming, is the process of writing computer programs. Computer programs are basically just commands telling a computer what to do. Let's look at a simple example of a program.
print("I'm a program!");
In the code editor above, there is a single line:
print("I'm a program!");. This line of code is just telling the computer, "print the statement 'I'm a program!' to the output". We are just telling the computer what to do in a language that it can understand. When looking at a program, the computer will generally work its way down the program, executing one line at a time. For example, consider the following program:
print("Hello, world!"); print("It's a great day to be alive!"); print("It's a great day to learn about coding!");
In the above program, there are now three lines of code. The computer just works its way down those three lines of code, executing each line of code when it gets to it. Try editing some of those lines, or adding your own! See how it changes the output to the right.
Adding Comments to Code
// notation. So,
// THIS IS A COMMENT will be interpreted as a comment, and the computer will not do anything with it. It's just so that we can add comments to our code so we can remember what we did. You will see these comments regularly throughout the code in this site.
print(3 + 5); print(10/2); print(5 + (3*2)); print(6/(2+1));
3(5). You will need to enter
To make this site more fun, mathematically, I have added a handful of extra math capabilities. For example, I have added some useful mathematical functions and constants - including the constant \(e\) and \(\pi\), as well as trigonometric and logarithmic functions. The editor below shows some of these math functions. It's okay if you don't know what all of them are, just be aware that you have some additional math functions available.
// Constants print( e ); print( pi ); // Trig functions print( sin(0.5) ); print( cos(0.5) ); print( tan(0.5) ); print( arcsin(0.5) ); // 3^2 is written as pow(3, 2) print( pow(3, 2) ); // log_4(16) written as log(16, 4) print( log(16, 4) ); // Natural log print( ln(e) );
The table below is a list of the additional math functions and what they do.
|The constant \(e\)|
|The constant \(\pi\)|
|Base to a power|
|The square root function|
|The sine function|
|The cosine function|
|The tangent function|
|The inverse sine function|
|The inverse cosine function|
|The inverse tangent function|
|The exponential function (base e)|
|The log function, base b|
|The natural log function|
|The greatest common denominator of a and b|
|The least common multiple of a and b|
|The ceiling function|
|The floor function|
|
OPCFW_CODE
|
This tool will accept a list of FlyBase symbols/IDs (for any data type) and, where necessary/possible, update them to their current versions. It will also convert certain external IDs (GenBank nucleotide/protein accessions, UniProt accessions, PubMed IDs) into their equivalent FlyBase IDs. The output is provided as a validation table that can either be downloaded as a file or exported to a FlyBase HitList for futher processing (including conversion between data types).
1. Either type/paste in a set of IDs/symbols into the 'Enter IDs or Symbols' box, or choose to 'Upload File of Identifiers' by clicking the Browse button. Spaces or returns should be used to separate the IDs/symbols (no commas or other text separators). The supported input types include:
- FlyBase IDs (for most data classes; e.g. FBgn (gene), FBal (allele), FBrf (reference) IDs)
- FlyBase symbols (for most data classes)
- FlyBase gene annotation symbols (eg. CG# or CR# for D. melanogaster)
- PubMed IDs (with or without a 'PMID' prefix)
- GenBank nucleotide/protein accessions
- UniProt (Swiss-Prot/TrEMBL) accessions
2. Choose whether to 'Return non-melanogaster' matches' (i.e. FlyBase entries matching the query ID/symbol from a species other than D. melanogaster) and whether to 'Match synonyms' (i.e. include ID/symbol synonyms when matching the submitted and FlyBase entries.) The default setting is to 'match synonyms' but not 'Return non-melanogaster matches'.
3. Click on the 'Submit Query' button.
4. The resulting table has four sections:
i) A header line listing the number of:
- Submitted IDs
- Unique Validated/Updated IDs
- Unknown IDs
ii) Buttons to export the list of validated IDs to:
- a FlyBase HitList (for further processing, including conversion between data types)
- the FlyBase Batch Download tool (to obtain and download additional data associated with each entry - NOTE: this option is enabled only for output lists comprising a single data class, such as 'genes')
iii) Buttons to download/save a file of:
- all unique validated IDs
- all unknown (unvalidated) IDs
- a TSV file of the entire validation report
iv) The validation table, comprising 4 columns showing:
- a checkbox indicated whether that row should be included in any 'export' request (see (ii) above)
- the submitted symbol/ID
- the validated (current) FlyBase ID
- the current FlyBase symbol, hyperlinked to the relevant FlyBase record
If one or more entered symbols/IDs mapped to multiple current FlyBase entries, then a WARNING message is displayed above the validation table, and the affected entries are marked with an exclamation mark (!).
The rows of the validation table are color-coded as follows:
- entered symbols/IDs that match current FlyBase symbols/IDs are colored green
- entered symbols/IDs that match non-current FlyBase symbol/ID synonyms and weresucessfully updated are colored yellow
- entered symbols/IDs that were unknown/unvalidated are colored red
- If the 'Match synonyms' box is checked, then entering an FBgn ID or CG number that has become a secondary ID for two current genes (e.g. FBgn0053520 or CG33520), or entering a CG number that is current for one gene but a synonym of another (e.g. CG10602), will generate two separate rows in the output table - one for each matching gene. A warning will appear above the validation table and the affected rows will be marked with an exclamation mark (!).
- Secondary IDs from 3rd party sources (UniProt, GenBank, PubMed) do not work (e.g. UniProt Q9VE67 does not work, but Q8IN81 does) - such IDs need to be updated at the 3rd party site before using the FlyBase ID converter.
|
OPCFW_CODE
|
Link to next release docs
I've added a link to the Netlify preview of the release-next branch so that contributors can more easily see the results of merging their next release documentation.
The process will be that we get all release notes, upgrade notes, installation links etc updated in the release-next branch before we do the final release and then merge that branch into master after we have done the final release.
We might even update the release-next branch with installation versions for each beta release, so that users can more easily do pre-release testing.
I assume (hope) that the netlify preview site will not appear search engine listings.
I really like the idea of linking to the Netlify preview! Simple solution to the problem. Traffic levels to next docs should presumably be low in any case and it should be easy to keep up to date, in theory.
I assume (hope) that the netlify preview site will not appear search engine listings.
This is the only real blocker I might have with this PR - I'm not sure if this is true. I'm not really familiar enough with SEO to say confidently, but I think that the site will appear in search engine listings if we link to it like this.
Looking at https://release-next--cert-manager-website.netlify.app/robots.txt it looks like the site would be indexed.
Also, the HTML on the index page + docs page of the preview explicitly asks to be indexed:
<head>
...
<meta name="robots" content="index,follow"/>
...
</head>
The response headers indicate that the page should not be cached, though:
cache-control: public, max-age=0, must-revalidate
(I also wonder if this would be a case for rel=nofollow, but again I don't really know how to be confident about that)
I feel like if we're going to rely on a preview in "production" like this, then it would be good to be sure that it's not crawled. But I imagine it'll be a pain to get a different robots.txt / meta tag served on a preview.
Long story short: I'm concerned that this will be indexed unless I'm missing something that'll stop that. I don't think we'd want it to be indexed. I'm happy to merge anyway I think, but I'll add a hold in case you want to investigate further.
/approve
/lgtm
/hold
The deploy-preview sites already have the noindex header, but branch builds do not, so I'll try and add it according to these instructions:
https://docs.netlify.com/routing/headers/#custom-headers-for-different-branch-or-deploy-contexts
deploy-preview
$ curl --head -i https://deploy-preview-1135--cert-manager-website.netlify.app/
HTTP/2 200
age: 0
cache-control: public, max-age=0, must-revalidate
content-type: text/html; charset=UTF-8
date: Fri, 06 Jan 2023 15:11:05 GMT
etag: "9768dd9514ea9f1665c3ea8ac7127bd7-ssl"
server: Netlify
strict-transport-security: max-age=31536000; includeSubDomains; preload
x-nf-request-id: 01GP3T5CRCGCDV30JHB5SZPCNM
x-robots-tag: noindex
content-length: 90955
vs
branch-deploy
$ curl --head -i https://release-next--cert-manager-website.netlify.app
HTTP/2 200
age: 52
cache-control: public, max-age=0, must-revalidate
content-type: text/html; charset=UTF-8
date: Fri, 06 Jan 2023 15:09:49 GMT
etag: "0183a3a8da5a1b1c8658bedb256a6597-ssl"
server: Netlify
strict-transport-security: max-age=31536000; includeSubDomains; preload
x-nf-request-id: 01GP3T4P29Q622KMH7CFR8634Q
content-length: 90955
Thanks for reviewing that branch. The noindex headers are now appearing, so I think it's safe to merge this PR now.
$ curl --head -i https://release-next--cert-manager-website.netlify.app
HTTP/2 200
age: 0
cache-control: public, max-age=0, must-revalidate
content-type: text/html; charset=UTF-8
date: Fri, 06 Jan 2023 16:04:41 GMT
etag: "3db0e5296370dec005cb3901ac3ba678-ssl"
server: Netlify
strict-transport-security: max-age=31536000; includeSubDomains; preload
x-nf-request-id: 01GP3X7HTFCFRZDTK2H4VXZ1PQ
x-robots-tag: noindex
content-length: 90955
/hold cancel
|
GITHUB_ARCHIVE
|
Looking for an easy method to move a table row up?
Place cursor on the row.
Press ALT + SHIFT + Up Arrow.
Repeat as necessary until the cursor is elevated to desired position. As you probably guessed, pressing ALT + SHIFT + Down Arrow moves the selected row down.
This trick is not just limited to tables. It also works with:
Okay, admittedly that last one was just wishful thinking :).
*Tip applies to Word versions 2003, 2007, 2010, and 2013. This tip may be relevant in earlier Word versions, but to confirm this I would have to pull out my old PC from its resting spot, on a shelf, under a pair of acid-wash jeans, wedged between an un-seeded Chia Pet and my Commodore VIC 20.
Here’s a quick Office tip that applies to Word, Excel and PowerPoint.
The Mark as Final feature enables you to protect a document to discourage editing. This simple seal of protectioncan easily be removed by the reader, should it be determined editing is necessary.
Note, this option is not designed to prevent edits, only to ward against unintentional editing. To render the document un-editable use other alternatives (for example, saving the file password protected or distributing a PDF version of the file).
To Apply Mark as Final
On the File tab, scroll down to Info, click Protect and select Mark as Final. A dialog will appear indicating “the file will be marked as final and saved.”
Click OK to confirm.
When backstage view is active, a notice appears in the status bar, indicating, “An author has marked this … as final to discourage editing.” The Application title bar also indicates that the file is Read-only. Reading, printing, and viewing options continue to function, but all editing features are disabled.
To remove the Mark as Final setting and restore edit functions repeat step 1, above. Alternatively, you can click the Edit Anyway button displayed on the info bar in the backstage view .
Here’s a quick tip that highlights PowerPoint’s easy to use Photo Album.
Remember the time when slideshow, meant a carousel of slides with you sitting in a dark room while [insert familial relation here] clicked through a series of pictures from some vacation?
No? Hmm, I may be dating myself. 😦
Take a retro moment; throw-away that text based presentation you have been struggling with (let’s face it, no one reads that stuff anyway) in favor of an old fashioned picture slideshow.
Create a Photo Album Slideshow:
On the Insert tab, in the Images group, click the top split of the Photo Album button. The Photo Album dialog appears.
Click the File/Disk button. The Insert New Pictures dialog appears.
Navigate to the folder that contains the pictures to be included and select those images. Note use CTRL + CLICK to ‘cherry pick’ images, or CLICK on the first picture and SHIFT + CLICK on the last to select that set of pictures.
Click OK to return the Photo Album dialog.
Optionally, adjust a picture’s settings by selecting that picture and then clicking the appropriate Move, Contrast or Rotate option.
Select a Picture layout (e.g., Fit to slide, 2 Pictures, etc.) and select a Theme.
Voila! Press F5 (shortcut) to run the slideshow
Should you need to edit the Photo album, click the bottom split of the Photo Album button and select Edit Photo Album.
Conditional Formatting makes it easy to visually highlight cells, based upon conditions (criteria) that you set. The conditional formats are dynamic, so as the data is edited, the criteria is tested, and the formats reapplied.
In a prior post, I mentioned how you can format an entire row or record based upon criteria found in one of that row’s fields. Here’s the step by step example, using the Charity Guest List data, used in the prior post.
Let’s demonstrate the COUNTIF using the first scenario. Count the number of guests who have donated more than $100
The intent is to format gold, those rows where the value in column D is $100 or greater.
A copy of this spreadsheet can be found here, on Google Drive. The file will open in a browser window/tab, in view mode. Click CTRL + S (PC) or select FILE, SAVE AS to download the file.
Open the file in Excel.
Select the donation data, cells A3:D15.
On the Home tab, in the Styles group, click the Conditional Formatting drop-down and select New Rule. The New Formatting Rule dialog will appear.
Select Use a formula to determine which cells to format.
In the Edit the Rule Description section, click in the Format values where this formula is true, field, then type or select the first criteria value, the donation in cell D3, followed by the criteria, ‘>=100’ (no quotes). Note if using your mouse to select the cell, Excel will add ‘$’ to indicate absolute references. It is important that you remove the absolute reference indicator before the row number.
Click the Format button. The Format Cells dialog appears.
Click Fill tab, and select a Background Color.
Click OK. The dialog should look like this:
A few more notes on Conditional formats.
To edit or delete the conditional formats: On the Home tab, in the Styles group, click Conditional Formatting and select Manage Rules.
You can create multiple rules and order how they should be applied.
Experiment: change some contributions in the spreadsheet and see how the conditional formatting reformats the row.
|
OPCFW_CODE
|
May 2017: Our R01 grant to study the variation and regulation of alternative splicing in mammalian transcriptomes is renewed by the NIH for four more years.
February 2017: Dr. Xing joins the Scientific Advisory Committee of the Center for Dynamic RNA Epitranscriptomes at the University of Chicago. This is a Center for Excellence in Genomic Sciences (CEGS) funded by the National Human Genome Research Institute, with the main focus being to develop single base, quantitative, and sensitive sequencing methods for RNA modifications.
September 2016: We received an R01 grant from the NIH Big Data to Knowledge (BD2K) initiative to develop statistical methods and informatics pipelines for integrating transcriptomic and proteomic big data.
July 2016: We received a National Institute of Mental Health (NIMH) R01 grant (with Drs. Douglas Black and Kathrin Plath) to study alternative splicing regulation in the mammalian brain.
January 2016: Lan's work on primate-specific, protein-coding Alu exons is published by Genome Biology (also see "Proteome diversification by genomic parasites", research highlight by Eli Eisenberg).
Nov 2014: Shihao and Juw Won's rMATS paper has been accepted for publication by PNAS. rMATS is a computational tool to detect differential alternative splicing events from replicate RNA-Seq data. [rMATS software]
Oct 2014: Shihao received a 2-year Human Biology Fellowship from the Huntington’s Disease Society of America (HDSA) on “Transcriptome Isoform Networks in Huntington’s Disease”. Congratulations! [HDSA press release]
Oct 2014: We received a Developmental Research Program Award from the UCLA SPORE in Prostate Cancer.
Sep 2014: Jinkai Wang's work in collaboration with Drs. Cosmas Giallourakis (MGH) and Howard Chang (Stanford) on m6A RNA methylation of mammalian embryonic stem cells is in press at Cell Stem Cell.
Sep 2014: We received a 2-year R01 grant from the NIH Roadmap Initiative to study epigenomic control of RNA processing.
Aug 2014: Our work in collaboration with Dr. Yang Shi (Harvard) on a chromatin regulator of intron retention has been accepted for publication by Molecular Cell.
Oct 2013: Our work with Beverly Davidson (University of Iowa) on mapping microRNA binding sites in the human brain will appear in the journal Neuron.
Sep 2013: We received a 5-year NIH R01 grant (R01NS076631) from the National Institute of Neurological Disorders and Stroke (NINDS) to study the transcriptome landscape of Huntington's disease.
Jun 2013: Dr. Yi Xing is appointed as a regular member of the NIH GCAT (Genomics, Computational Biology and Technology) study section (2013-2019).
Jun 2013: We received a one-year research award from the UCLA Broad Stem Cell Research Center (BSCRC).
|
OPCFW_CODE
|
As your CTO could say, it’s not only about the framework’s popularity but about what you can do with it. Read on to find out what you should know before committing to one.
What is Angular?
The latest 10.1.3 version of Angular from September 2020 is a rewrite of that original framework. It works up to 5–10 times faster under an improved algorithm for two-way data binding and the addition of component-based architecture.
What is React?
It’s now in the 16.13.1 version released in March 2020 that introduced custom DOM attributes with better server-side rendering. Developers use React for similar purposes as Angular. The React library is a foundation for PWAs, social media platforms, data dashboards, and cross-platform mobile apps. React’s wide market adoption comes from its clear formatting style and rich component availability.
Over 65 thousand respondents of the 2020 Stack Overflow Developer Survey picked Angular and React as the most-wanted frameworks for web development. Why? Consider what both powerhouses offer.
% of developers wanting to work in React, Angular, and other web frameworks. Source:2020 Stack Overflow Developer Survey
Component-based architecture — Agile and React provide an open-world coding environment where developers can swap functions without the need to rebuild the app
Rendering — Client-side and Server-side rendering is available, so loading-time optimization is possible regardless of the app’s complexity
Open-source MIT license — both frameworks are free to use, change, and re-sell for personal and commercial use
Community — a considerable following keeps them functional: there are 157 thousand starred React projects on GitHub compared to Angular’s 66 thousand stars.
Differences between Angular and React
The biggest divide is between what you get in the base package.
Google engineered React as a minimal UI development tool on purpose. It won’t send you push notifications or fetch data for you. But it can. Devs expanded the library with necessary framework components for faster app prototyping because of React’s successful adoption.
Angular’s creators packed it for a punch with the essential components that any competitive web app needs. Sounds good? Well, that creative freedom is overwhelming for inexperienced devs.
They have relatively small core builds. Angular 2 with the RX package weights under 800KB where the React library with the React DOM component reaches around 100KB. A smaller bundle offers greater speed. Since there are many optimizations you can use, both should be of comparable speed when used for small to mid-sized applications.
Angular might have longer rendering time when dealing with hundreds of requests. This is because it’s based on a real DOM (Document Object Model) which by design updates each HTML object in the structure. React only targets virtual DOM elements that need change without re-rendering the component tree.
As you’ll see in a minute, React and Angular equally empower high-traffic web apps used by big brands. Considering the developer community’s rich interest in them and a rising number of downloads, you can sleep easy. Facebook and Google will maintain them for years to come.
Downloads for React and Angular since 2015. Source:npm trends
In this comparison, the pros point out that React needs a lot more strategizing as the minimalistic library relies on third-party React components. That doesn’t make it less powerful. You’ll just need to invest precious time into component management.
Since Angular was built as a full framework, it comes pre-packed with features such as the HttpClientModule or the RouterModule you’ll find necessary to expand your web app.
Angular vs React: popularity of use
Programming is for collaborators. The more people are engaged with technology, the more support, tutorials, and answers you can expect. You already know that React outranks Angular in the number of starred projects on GitHub.
An overview of GitHub data for Angular and React repositories. Source: GitHub Statistics
Lets verify other sources. Stack Overflow — one of the most vibrant programming communities — now registers more questions regarding React that Angular. Note that most Stack Overflow users ask for specific code advice when they’re stuck, so you can consider the graph below as a representation of project involvement.
Metrics on the graph count the number of questions tagged with “ReactJS” or “Angular”. Source: Stack
There’s strong market demand for both sides, so it’s a tie. If you browse for jobs in the global Indeed.com database (registered November 2020), you’ll see 11,388 offers for “React developer” query against 10,073 results for “Angular developer”. For comparison, only 5,880 jobs are listed for jquery, which is the most used web framework in 2020 according to Stack Overflow.
Want to outsource software development?
Talk to our team and confidently build your next big thing.
Our devs are so communicative and diligent you’ll feel they are your in-house team. Work with experts who will push hard to understand your business and meet certain deadlines.
What is Angular Used For
Google AdWords, their Marketing Platform, and the G Suite pages use AngularJS while the newer Angular is behind Gmail, Google Cloud Source Repositories, or Google Shopping.
Five notable Angular applications include:
Discover other projects made with Angular
What is React Used For
The library has been Facebook’s go-to UI solution for years. Their developers relied on React components in the latest redesign of the website to deliver performance-optimized front, messenger chat, and Ad Manager components. WhatsApp’s and Instagram’s browser versions are also based on React.
The New York Times
Discover other projects made with React
React vs Angular — which one should you pick?
You should make your choice with three factors in mind: time to adapt; deployment speed; and employer/client business requirements.
Developers love React because:
It has elegant programming style and patterns
There’s a rich package ecosystem
It’s a well-established option
Developers rank Angular as their second framework bet, which is reflected by the 208,149 websites that are using it. While there’s a wide appreciation for its pre-build functionality, the tool’s complexity makes it harder to implement. Angular’s rules force disciplined programming so that the code produced in TypeScript can be scaled-up with no confusion. Once you become an adept, you’ll find that Angular is a collaborative framework with many command-line hacks for the deployment, optimization, and maintenance of your web platform.
Developers love Angular because:
It’s full-featured & powerful
It has elegant programming style and patterns
There’s good documentation for it
Which developer type are you?
Whether you choose React or Angular, we wish you the best luck with your next project. Share in the comments what you are looking to build.
The Tech Platform
|
OPCFW_CODE
|
The best Side of exam help onlineSorry we r not owning online apply check for UGC NET Physical Education, but we have the common papers. If u r serious about that then kindly do totally free registration and location order.
InterNACHI gathers International data (hyperlink requires several times to open up) about Every question on the exam. These data are instantly updated each and every time somebody will take the exam. Data concerning standard of problems are shown for each dilemma when you go ahead and take exam. Live Worldwide studies (website link can take a handful of times to open up) with regard to the exam, which include pass/are unsuccessful charges, are offered on our Site. Also, read:
Attempts to eliminate the tension of appearing in examination(s), regardless of whether for all subjects or in one topic at a set time and routine.
InterNACHI's Online Inspector Examination is extremely secure. Every single exam is dynamically established utilizing Superior randomization procedures. The info is then saved server-facet, which makes it practically unattainable to cheat the system.
Examining: Anticipate the stories on the particular exam to become for a longer time then both the online apply exam or the handbook. Having said that, written content may be very identical, as are queries.
NIOS AI (review centre) or at A different reputed college already discovered for the practical Examination
We do not need pack for UGC Internet Physical Instruction key paper. But we do have frequent papers online exercise test. If interested then kindly do free registration and place purchase. For more information Get hold of us @ 08682900900.
Concerns which have protection implications or go over larger sized problems depend much more than concerns that have significantly less really serious implications or are geographically regional. Put simply, thoughts that test to determine if you recognize issues each and every inspector ought to know are weighted greatly. Inquiries that take a look at to discover if you already know things that are close to the skin with the scope of a home inspection, or freshly introduced, untested thoughts are weighted flippantly. Other exams count Each individual dilemma equally. InterNACHI's Online Inspector Examination is extremely protected. Every single exam is dynamically produced utilizing advanced randomization approaches. The data is then stored server-facet, making it approximately impossible to cheat the system. InterNACHI's Online Inspector Examination makes a customized analysis display which summarizes your areas of energy and weak spot. The summary even features a colour pie chart. Other exams offer little or no summary. InterNACHI's Online Inspector Examination (a dwelling doc) is produced in accordance with accepted psychometric standards, making certain an impartial, legitimate and responsible assessment of inspector skill, expertise and knowledge. InterNACHI takes advantage of an entire-time previous College exploration analyst to watch the exam. Other examinations are really association-precise and by no means evolve. InterNACHI utilised many acceptable techniques to come up with our passing score of 80, like a version in the Modified Angoff Strategy. It truly is fascinating to note that this technique decided our passing score should be established at seventy one. Additionally it is intriguing to notice that the typical rating is failing. InterNACHI's Online Inspector Examination's pool of queries and answers weren't Each individual drawn subjectively from only one reference, but are produced utilizing enter from inspectors why not try this out and authorities from all over the state. Attainable answers and the actual wording from the thoughts may also be subjected to this industry-extensive scrutiny. InterNACHI's Online Inspector Examination is really a dwelling doc that keeps up While using the instances. Extra people have taken InterNACHI's Online Inspector Examination than all other inspection examinations merged. Furthermore, our technique has crafted-in intelligence which continually calculates and acknowledges every little thing from particular person repeat advancement to Over-all field understanding trends. All of this combined with consistent nationwide skilled participation and improvement helps make InterNACHI's Online Inspector Examination the industry common for examining inspectors.
Sorry we would not have UGC NET online observe take a look dig this at for House Science but we contain the frequent papers online practice take a look at. If u r interested then kindly do no cost registration and area buy.
It's really a free of charge Observe check just so that you can exam your understanding and techniques. You won't get any certifacate after passing it. After passing the take a look at you're going to get all the incorrect answers proven.
Don't worry. Passing is passing. Your rating is computed and discovered for you at the end of the exam. Even so, your score is not mirrored in your certification. There is not any motive to keep having the exam in an effort to attempt to obtain a better rating, Except, not surprisingly, you find it pleasurable!
Some exam takers have tried as quite a few as 15 situations in the middle of someday with no setting up a correlation concerning accomplishment and number of makes an attempt. Write-up-exam interviews exhibit improvement can only feature research concerning makes an attempt.
I am M.Lib.Sc.wish to go through the UGC, Web.I could possibly be supplied the materials of take a look at papers like common consciousness ie reasoning and subject paper II & III for online exercise or planning My e-handle is-
The examination is free of charge. You normally takes the free online exam now within the comfort of your own personal residence at no cost. It truly is graded promptly online.
|
OPCFW_CODE
|
What is the correct antenna configuration for an Intel Centrino 6300 module on a Dell Studio XPS 16?
I purchased an upgraded wireless card (Intel Centrino 6300) for my Dell Studio XPS 16, and need to know the correct antenna configuration. There were no install instructions for the wireless card, nor does the laptop itself explain the antenna configurations.
The laptop has three antenna cables, black, white, and grey. One of them was not used on the old wireless module. The wireless seems to work with the assignment I've chosen, but I've been having some connectivity issues which are perhaps related to an improper configuration.
I have purchased the same card and after quite some research I did it this way:
white cable (main connector)
black cable (aux connector)
grey cable (middle connector for MIMO)
Performance is great!
This is probably way too late for the Original Poster, but you can find the dell branded versions of the Intel card on Ebay. The white and black wires stay exactly like they were, and the gray wire is in the middle. So position 1 is white, position 2 is black, and position 3 is gray. If you have the card turned so that the mini PCI-E connector is on the bottom and the wireless hookups are on top, position 1 is on the left (white), position 2 is on the right (black), and position 3 is in the middle (gray).
I found this on the Dell site
.
Source Page
.
I could find nothing on the Intel site about that particular card and the antenna connections
Yeah I have that info too, sadly it's of absolutely no help.
I think you need to connect all 3 antenna wires.
Well, obviously. And they are all hooked up -- but I don't know which color goes to which number.
There was no information that came with the card when you bought it? If not contact Intel Support and ask.
Intel doesn't do consumer facing support for oem parts, and the oem parts don't come with manuals, unfortunately.
@Charles Randall, I have gotten superb support from Intel on a few OEM problems of mine, not sure what your experience with Intel has been.
For other late comers, the answer is so obvious, many have missed it. The card has color coded triangles on it next to the connectors.
not all of them got colors - https://images-na.ssl-images-amazon.com/images/I/51lgf9GPuSL.AC_SY400.jpg
which is a bit weird then, because on a Lenovo laptop the wires were Gray, White, Black (not sure if they have been moved by someone tho)
That’s an Intel WiFi Link, not Centrino. The question was specifically for the Centrino 6300 in a Dell XPS, that image is a WiFi Link 5300, different model.
|
STACK_EXCHANGE
|
How do I install the driver for the Lattice Semiconductor USB cable under Windows Vista?
Directions for installing the USB driver on Windows Vista are located in the ispvmsystem readme.txt file. The instructions are duplicated here for quick reference.
For Windows Vista operating system, you must first turn off the User Account Control (UAC) before installing the driver. To turn off the User Account Control (UAC):
- Open the Control Panel.
- Select User Accounts.
- Select User Accounts.
- Select Turn User Account Control on or off.
- In the "Turn on User Account Control (UAC) to make your computer more secure" dialog, deselect "Use User Account Control (UAC) to help protect your computer."
- Click the Ok button.
- Reboot your PC.
After installing the driver and rebooting your PC, you may reset the UAC if desired, and reboot your PC.
To install the USB cable driver after the ispVM System is installed:
ispTools->Install/Uninstall LSC USB/Parallel Port Driver
You can plug the ispDOWNLOAD USB cable into the PC when the PC is turned off or when it is on. Make sure that the ispVM System software is closed before plugging the cable into your PC or unplugging it from your PC and that the USB driver is already installed.
To plug the cable into the PC:
- Make sure that the ispVM System is closed, and then plug the cable into your PC.
- If your PC is turned on, wait about one minute for the Windows operating system to recognize the USB cable. The amount of time will vary, depending on your PC's speed.
NOTE: Be sure to turn off the target board's power before connecting or disconnecting the USB cable to the target board.
Use the ispVM cable auto detection to determine which cable and port you are using.
To access Auto Detect:
- Choose Options->Cable and I/O Port Setup.
- In the dialog box, click Auto Detect.
If both a parallel port and a USB cable are connected to your PC, the ispVM System software will select the USB cable. To select the parallel port cable, manually select the Lattice cable from the Cable Type drop-down list.
If more than one USB cable is connected to your PC, the ispVM System software will detect all available cables, but it will select the first cable it detects. To select a different USB cable, select the USB cable from the Port Setting drop-down list.
If ispVM does not recognize the USB cable even after installing the LSC USB Port Driver and rebooting, view the Device Manager to see if the PC correctly recognizes the ispDOWNLOAD USB Port Cable. To do this:
- Choose Start, Settings, Control Panel, and System.
- In the System Properties dialog box, click the Hardware tab and Device Manager button.
- Under on Universal Serial Bus controllers, you should see Lattice USB ISP Programmer. If not, if you see Unknown USB instead, do the following:
- Right click on Unknown USB and select Properties.
- Click on the Driver tab and click on the Update Driver button.
- Follow the steps to update the driver.
On Windows XP, if you get the following message while updating the driver:
"The software you are installing for this hardware: Tri-L USB ISP Programmer USBISP has not passed Windows Logo testing to verify its compatibility with Windows XP."
Select "Continue Anyway".
|
OPCFW_CODE
|
Today is a day of celebration. In an odd world where everybody needs to stay home, we have released our Release Candidate for PHsPeed 1.0. Although there is still a lot to do, we now also have time to work on the website, documentation, videos, payment gateway, and all other things that need to be done. It does also mean that we will start looking for developers that can complete our end-user test team: team-EU. They supplement our team-B (beta test team). Currently, we have vacancies in both teams but there are different requirements to enter:
Team EU is responsible for testing already created end-user systems to ensure compatibility issues. As there are currently not many applications build yet, it is important that you have (small and medium-sized) projects you want to create with PHsPeed. If you have (parts of) projects that can be used as a demo that would be a big plus. You will also require to be active on our user forums to assist with typical end-user questions.
All team members will have direct access to our development team. Our development team is responsible for answering questions on forums the test-teams can't answer.
In exchange, you will get access to beta releases and a free developer license. There is always a discussion regarding the time and effort that you should put into the product. However, while you can use the product for free to create your project, you will have direct access to our development team. That is a big advantage as you can expect that response will be faster. You also have a direct way to discuss new features. If you can spare a few minutes a day to read the forums then answering questions, in general, doesn't have to take that long.
To take part in one of the teams, we need to know a few things from you. The beta team consists of developers that already have been working with PHsPeed for quite some time. As soon as we have vacancies we will decide who we will invite to join. We will do that based upon the activity on our forums as it shows the number of skills you have.
To take part in our team EU you need to be a trusted user. That is difficult for now as we are not on the market yet. So if you are interested in being an early adopter and think that PHsPeed might be the product for you then send an email with at least some information about your motivation, what you want to create with the product, the area you are working in, if you are a private person or work in behalf of a company and the kind of developer you are. We are still working on a lot of documentation, videos, and more, and as long as they are not ready we cannot start team EU. However, as soon as we will, then we will select people our of our pool that has applied. If you are a skilled developer that can work without a full set of documentation because you know the concepts of PHP, oop, visual design with events (like Delphi and Visual Studio) then we can provide you the password to install PHsPeed and have a look.
|
OPCFW_CODE
|
Sunnie S. Y. Kim
I’m a first-year CS PhD student at Princeton University working with Prof. Olga Russakovsky in the Princeton Visual AI Lab. I’m interested in computer vision and machine learning, especially in the space of fairness and interpretability. Currently, I’m focusing on developing my research skills and building a background in various areas. My recent projects span image style transfer (ECCV 2020), unsupervised image segmentation (CVPR 2021), dataset de-biasing with GANs (CVPR 2021), and contextual bias mitigation in object recognition (MLRC 2020).
Previously, I received a B.S. degree in Statistics and Data Science at Yale University and worked with Prof. John Lafferty on generative models and visual information encoding in the Yale Statistical Machine Learning Group. I also led the data analysis work for the 2018 Environmental Performance Index under the guidance of Prof. Jay Emerson. After graduation, I spent a year at Toyota Technological Institute at Chicago doing computer vision and machine learning research with Prof. Greg Shakhnarovich.
My go-to hobby is watching tv (to be honest😊) but I also like reading Korean books and trying out different coffee & wine. Sometimes when I feel like I need more physical activity in my life, I play tennis and take long walks.
Feb 2021: Two papers accepted to CVPR 2021!
Jan 2021: Participated in the ML Reproducibility Challenge 2020 with Sharon Zhang, Nicole Meister, and Olga Russakovsky.
Jan 2021: A Shallow Artificial Neural Network Recovers Structure of Visual Loom-Selective Neurons by Baohua Zhou, Zifan Li, me, John Lafferty, and Damon Clark has been accepted to Cosyne 2021!
Dec 2020: Another preprint: Information-Theoretic Segmentation by Inpainting Error Maximization by Pedro Savarese, me, Michael Maire, Greg Shakhnarovich, and David McAllester.
Dec 2020: New paper on arXiv! Check out Fair Attribute Classification through Latent Space De-biasing by Vikram V. Ramaswamy, me, and Olga Russakovsky.
Nov 2020: Gave a short guest lecture on image synthesis in Princeton’s undergraduate computer vision course.
Sep 2020: Gave a talk on Deformable Style Transfer at Princeton PIXL lunch talks.
Aug 2020: Started my PhD at Princeton University!
Aug 2020: Attended ECCV 2020 and presented Deformable Style Transfer by me, Nicholas Kolkin, Jason Salavon, and Greg Shakhnarovich at the main conference and the WiCV workshop.
July 2020: Wrapped up my time at TTIC as a visiting student. The year went by very quickly. I’ll especially miss the Perception and Learning Systems group, the 2019-2020 cohort friends, and the Girls Who Code team!
|
OPCFW_CODE
|
So as many know in the last month or so Microsoft released MS16-087 which corrects two security issues with the print spooler but also breaks the whole click-to-print thing. Link to the article: https://support.microsoft.com/en-us/kb/3170005
The only method we have been able to utilize to get our 95% BYOD user base to continue to print is to install local printers on their devices. This has worked well in almost all of our branch offices except one. At that one office apparently since last week but no one put in a ticket about it, the Ricoh MP c4503 printers will continue to flash data in, but nothing will print. The only way the staff in that office have been able to allow users to print is to power cycle the printer. Power cycling allows one job through, and then the printer goes back to the flashing light.
I have been staring at this so long I am going cross-eyed, so I thought I would ask you guys. To me, this seems like someone has a corrupt file they are trying to print, and its bottle-necking the printer. Without a print server, I can't just delete all the jobs in the queue. The printer's web interface shows zero jobs. Ricoh is supposed to be out tomorrow morning, but at this point, that entire office has been down printing-wise since Friday. Does anyone have any ideas that I could try, to help nudge this thing along?
Thanks in advance.
Don't know about Ricoh printers, but have seen similar symptoms in past with Konica Minoltas where jobs would print then just stop until power cycle or an extended wait period. Solution for us was to disable SNMP Status Enabled in the driver printer port configuration. Be sure to do all locally installed drivers and then power cycle printer.
Other things you could try would be to disable Advanced printing features in the Advanced tab of printer properties and checking that Print Directly to Printer is not selected.
To help isolate if one specific PC causing the issue, you could pause the print queues on all users PCs, power cycle the printer and then un-pause the queues one by one and see if it fails when a specific PC is re-enabled.
Ran into this when trying to use gpo to deploy some printers. You can edit the registry on the print server for the print drivers in question. I believe the key is under HKEY_LOCAL_MACHINE\SYSTEM\CurrentControlSet\Control\Print . you need to find the v3 driver for the printer look at printerattributes it has a hex value. If it is even ms oes not see it as being package aware. If it is 2 for example change it to 3 then reboot the server. I will double check the location when i get into the office.
The correct location is:
|
OPCFW_CODE
|
I'm so confused!
This section deals with whatever doesn't fit elsewhere. What you will find here are tips
on creating your character, how to understand the locals, and basic game information in
the form of screenshots, official art, and eventually the Interplay FAQ for Torment.
This part contains instructions on creating your character,
and information on how to create the character that suits you
The world of Planescape has a kind of language all it's own. If you want a better
idea of what people are saying to you, and what they mean, refer to this.
This will show the un-initiated what the game looks like.
Although not all of this made it into the game, here is some conceptual art and
there are also some good Windows backgrounds here.
Here you can play scrambling and solving puzzles of Torment-related images.
Planescape is a setting for Advanced Dungeons and Dragons that is
a little off the beaten path. It's not a city, not a world, not
a universe. The proper term to describe Planescape, or so I have
gathered, is a Multiverse. So what's a Multiverse? Think of a bunch
of parallel universes like in some old Twilight Zone episode or
Anyway, Here's a brief overview of the playing field:
||The Inner/Elemental Planes.
These planes mirror the elements, Earth, Fire, Water, etc.
||The Outer Planes.
These are kind of hard to conceptualize, but basically reflect
psychological elements rather than physical ones. They have
Planes for Good and Evil here, Order and Chaos, etc. Kind of
like alignments in AD&D.
||The Astral Plane.
This is a void that connects the outer planes with the Prime
Material Plane, or the place where you live (I think).
Ethereal Plane. This connects the inner planes with the
Prime Material Plane.
||Sigil. First, it's
shaped like a ring. It's true neutral ground, with said neutrality
enforced by a super powerful being known as the Lady of Pain.
Even the Gods can't come in without her permission. This is
the center of the Multiverse. Finally, it has portals to all
the other planes.
lots of potential for neat things to do, places to go, beings to
see, right? Also, just about too much to keep track of, which is
the problem. This site will hopefully help you deal with it.
In Torment, you play the Nameless One, a lost soul who seems to
be immortal. Every time you die you just wake up again (in Sigil
I think), but with no memories. You just kind of stand up, shake
yourself off, and try and figure out who you are and what's going
on... Oh yeah, you look like a corpse too...
that's pretty much the plot, but the story is up to you. How you
find out, where you go, what you do, who you do it with or to, all
are at your discretion. The story is not too linear, but the gist
of it is that if you want to find out what's going on, you do a
series of quests.
So what's there to find out? Well, you can recover powers you once
had for one. We do know that once you were a 24th level Cleric.
Now how did that chant go? Recover your old skills and go do some
butt-kicking for goodness!
Finally, alignment has gotten a few tweaks. For those experienced
with AD&D, the changes may take some getting used to. The biggest
thing is that it is no longer permanent. Instead of choosing an
alignment and basing your actions on it, your alignment will be
determined by what you do. This can be pretty slick where NPCs are
concerned, as you have to guess at them for a while.
|
OPCFW_CODE
|
Support test target for desktop msbuild
Description
User should be able to run tests specifying the test target with desktop msbuild. For example
> MSBuild testapp.csproj /t:VSTest
This target should first build the app and then run tests for all the outputs (similar to how dotnet test behaves today).
To get this target, user may need to add a package reference to Microsoft.NET.Test.Sdk. This package will include the target and the related task.
Related customer ask: https://github.com/dotnet/sdk/issues/546
Until this is done, some projects cannot successfully run tests this way. dotnet test cannot execute a test if the csproj contains targets that use a desktop based Task. There are many existing useful build tasks that can be used to generate a netcoreapp project but they must be run using desktop msbuild.
I don't think MSBuild testapp.csproj /t:VSTest should force build on the project. If I wanted to build the project I would have used /t:Build;VSTest.
Also I'd consider calling the target Test instead of VSTest, to be consistent with dotnet test.
Is there a workaround for this? I have a multi-targeted test library that can only be built with desktop msbuild. I want to run tests!
@onovotny Check this one out:
https://github.com/dotnet/roslyn-tools/blob/master/src/RepoToolset/Test.props
https://github.com/dotnet/roslyn-tools/blob/master/src/RepoToolset/Test.targets
You should be able to use these in your project.
(the .props file might not be needed)
Thanks, will check it out. Do you use anything for Code Coverage? I was using opencover but that seems broken lately and def doesn't support ppdb's or embedded ppdb's yet.
@tmat your targets had something that might actually work for me -- after running the build normally with msbuild /t:build, I tried using dotnet test --no-build. I think that actually worked! The issue is that I'm using GitVersionTask in some of my projects and those tasks don't yet support coreclr. So once it's built, it doesn't matter.
Looks similar to issue #1155 I raised. Is there any update when "msbuild /t:VSTest" will be able to test both dncore and regular desktop app ? Thanks!
I have removed the question tag. We will use this item as an input when we take up planning for Q2 of next year.
Bump. Using dotnet test --no-build no longer works for certain scenarios. Really need a way to invoke the targets via an MSBuild target.
I should add that as-of MSBuild 15.6. we can now have SDK's distributed via NuGet, so it should be a lot easier to support adding this as a test sdk out-of-band for both msbuild and dotnet.
Anyone planning to fix this? Building via dotnet msbuild CLI and MSBuild.exe is generally inconsistent.
Bump?
@onovotny We haven't decided when to pick this yet. If you would like to contribute, Let me know I'm happy help you.
Bump.
is there at least a workaround for this? e.g. can I somehow tell msbuild.exe to import the targets explicitly?
Y'all should probably close this issue if the future milestone gets removed without a replacement. As much as i'd like to have the feature i'd rather take a set answer on whether its something y'all want to take on.
@novacole, this is what you're looking at with https://github.com/dotnet/msbuild/pull/9193, right?
@novacole, this is what you're looking at with https://github.com/dotnet/msbuild/pull/9193, right?
Yes, that's correct!
This is a new feature and won't be implemented, we are focusing on adding new features to Testing.Platform instead. https://aka.ms/testingplatform
|
GITHUB_ARCHIVE
|
I have a pretty new-ish and decent computer at home, loaded with plenty of memory, and FIOS hi-speed connection.
I use IE. Just about no other website makes my computer at home slow down to a snail's pace as this one does. Basically I keep getting get screen-freeze for about 20 seconds until it decides to un-freeze. (I'm not talking about how long it takes a page to load - I realize that's more server/connection, and happens to everyone.) Cranking up the virtual memory has helped a little, but not a lot. Point is, it doesn't happen with any other websites, just this one. (Yes, I'm sure.) I wish I understood why.
(This post was edited by Andy9o8 on Feb 6, 2008, 5:06 AM)
Traditionally Internet Explorer has set the size of its temporary internet files folder to a percentage of total disk space. In recent years, with the massive increases in hard drive capacity, this has led to the cache being set to ridiculously large sizes... far too large for IE to be able to cope reliably. Web sites may slow down, pictures may stop displaying, hyperlinks may stop working (actually, they haven't stopped working, its just that IE is collapsing under the weight of all that saved data), you may see the classic "page cannot be displayed" error... and that's just for starters.
If your cache is set far too large, IE7 will reduce it to 1024MB the first time that you click on the Settings button for Browsing History Settings button, but I have found this is still too large.
Click on IE, Tools, Internet Options, General Tab, Settings Set your IE cache to between 50 and 250 Meg (I recommend 50 - 100) Next, delete all temporary internet files, including offline content (Tools\Internet Options\General tab\Delete button). Delete your IE history and all cookies. Then, restart IE.
Thanks. I'll try it once I get home. But, FWIW, I do clear out the cache daily (sometimes more), and also daily run a program called "CCleaner", which clears a lot of that residual stuff (cache, temp files, etc.) from the registry. But, I'll try reducing the cache limit now, too. Thanks.
I been getting the same thing here and I empty my temp files and cookies everyday too, I have been seeing "waiting on http://page.I.googleadpage,blah blah blah something or other when my screen freezes up for 30 seconds> I was here to post a get rid of the damn google ad's post, because it seemed to start doing this once all the stupid flashy banner ads showed up.
|
OPCFW_CODE
|