npm i @lambdatest/node-tunnel
var lambdaTunnel = require('@lambdatest/node-tunnel');
//Creates an instance of Tunnel
var tunnelInstance = new lambdaTunnel();
// Replace <lambdatest-user> with your user and <lambdatest-accesskey> with your key.
var tunnelArguments = {
user: process.env.LT_USERNAME || '<lambdatest-user>',
key: process.env.LT_ACCESS_KEY || '<lambdatest-accesskey>'
};
// Callback Style
// Atarts the Tunnel instance with the required arguments
tunnelInstance.start(tunnelArguments, function(error, status) {
if (!error) {
console.log('Tunnel is Running Successfully');
}
});
// Promise Style
tunnelInstance
.start(tunnelArguments)
.then(status => {
console.log('Tunnel is Running Successfully');
})
.catch(error => {
console.log(error);
});
// Async/Await Style
(async function() {
try {
const istunnelStarted = await tunnelInstance.start(tunnelArguments);
console.log('Tunnel is Running Successfully');
} catch (error) {
console.log(error);
}
})();Start tunnel Instance.
tunnelArguments: credentials for secure tunnel connection.user: The username for the TestMu AI account.key: The accessKey for the TestMu AI account.
callback(function(error, status)): A callback to invoke when the API call is complete.
// Callback Style
tunnelInstance.start(tunnelArguments, function(error, status) {
if (!error) {
console.log('Tunnel is Running Successfully');
}
});
// Promise Style
tunnelInstance
.start(tunnelArguments)
.then(status => {
console.log('Tunnel is Running Successfully');
})
.catch(error => {
console.log(error);
});
// Async/Await Style
(async function() {
try {
const istunnelStarted = await tunnelInstance.start(tunnelArguments);
console.log('Tunnel is Running Successfully');
} catch (error) {
console.log(error);
}
})();Get Status of tunnel Instance.
// Callback Style
tunnelInstance.start(tunnelArguments, function(error, status) {
if (!error) {
console.log('Tunnel is Running Successfully');
var tunnelRunningStatus = tunnelInstance.isRunning();
console.log('Tunnel is Running ? ' + tunnelRunningStatus);
}
});
// Promise Style
tunnelInstance
.start(tunnelArguments)
.then(status => {
console.log('Tunnel is Running Successfully');
const tunnelRunningStatus = tunnelInstance.isRunning();
console.log('Tunnel is Running ? ' + tunnelRunningStatus);
})
.catch(error => {
console.log(error);
});
// Async/Await Style
(async function() {
try {
const istunnelStarted = await tunnelInstance.start(tunnelArguments);
console.log('Tunnel is Running Successfully');
const tunnelRunningStatus = tunnelInstance.isRunning();
console.log('Tunnel is Running ? ' + tunnelRunningStatus);
} catch (error) {
console.log(error);
}
})();Get name of the Running tunnel Instance.
callback(function(tunnelName)): A callback to invoke when the API call is complete.
// Callback Style
tunnelInstance.start(tunnelArguments, function(error, status) {
if (!error) {
console.log('Tunnel is Running Successfully');
tunnelInstance.getTunnelName(function(tunnelName) {
console.log('Tunnel Name : ' + tunnelName);
});
}
});
// Promise Style
tunnelInstance
.start(tunnelArguments)
.then(status => {
console.log('Tunnel is Running Successfully');
tunnelInstance.getTunnelName().then(tunnelName => {
console.log('Tunnel Name : ' + tunnelName);
});
})
.catch(error => {
console.log(error);
});
// Async/Await Style
(async function() {
try {
const istunnelStarted = await tunnelInstance.start(tunnelArguments);
console.log('Tunnel is Running Successfully');
const tunnelName = await tunnelInstance.getTunnelName();
console.log('Tunnel Name : ' + tunnelName);
} catch (error) {
console.log(error);
}
})();Stop the Running tunnel Instance.
callback(function(error, status)): A callback to invoke when the API call is complete.
// Callback Style
tunnelInstance.start(tunnelArguments, function(error, status) {
if (!error) {
console.log('Tunnel is Running Successfully');
tunnelInstance.stop(function(error, status) {
console.log('Tunnel is Stopped ? ' + status);
});
}
});
// Promise Style
tunnelInstance
.start(tunnelArguments)
.then(status => {
console.log('Tunnel is Running Successfully');
tunnelInstance.stop().then(status => {
console.log('Tunnel is Stopped ? ' + status);
});
})
.catch(error => {
console.log(error);
});
// Async/Await Style
(async function() {
try {
const istunnelStarted = await tunnelInstance.start(tunnelArguments);
console.log('Tunnel is Running Successfully');
const status = await tunnelInstance.stop();
console.log('Tunnel is Stopped ? ' + status);
} catch (error) {
console.log(error);
}
})();Every modifier except user and key is optional. Visit TestMu AI tunnel modifiers for an entire list of modifiers. Below are demonstration of some modifiers for your reference.
Below credentials will be used to perform basic authentication of your TestMu AI account.
- user (Username of your TestMu AI account)
- key (Access Key of your TestMu AI account)
If you wish to connect tunnel on a specific port.
- port : (optional) Local port to connect tunnel.
tunnelArguments = {
user: process.env.LT_USERNAME || '<lambdatest-user>',
key: process.env.LT_ACCESS_KEY || '<lambdatest-accesskey>',
port: '<port>'
};If you wish to perform tunnel testing using a proxy.
- proxyhost: Hostname/IP of proxy, this is a mandatory value.
- proxyport: Port for the proxy, by default it would consider 3128 if proxyhost is used For Basic Authentication, we use the below proxy options:
- proxyuser: Username for connecting to proxy, mandatory value for using 'proxypass'
- proxypass: Password for the USERNAME option.
tunnelArguments = {
user: process.env.LT_USERNAME || '<lambdatest-user>',
key: process.env.LT_ACCESS_KEY || '<lambdatest-accesskey>',
proxyHost: '127.0.0.1',
proxyPort: '8000',
proxyUser: 'user',
proxyPass: 'password'
};Human readable tunnel identifier
- tunnelName: (Name of the tunnel)
tunnelArguments = {
user: process.env.LT_USERNAME || '<lambdatest-user>',
key: process.env.LT_ACCESS_KEY || '<lambdatest-accesskey>',
tunnelName: '<your-tunnel-name>'
};Populate the path of the local folder you want to test in your internal server as a value in the below modifier.
- dir/localdir/localdirectory : Path of the local folder you want to test
tunnelArguments = {
user: process.env.LT_USERNAME || '<lambdatest-user>',
key: process.env.LT_ACCESS_KEY || '<lambdatest-accesskey>',
dir: '<path of the local folder you want to test>'
};To log every request to stdout.
- v/verbose : true or false
tunnelArguments = {
user: process.env.LT_USERNAME || '<lambdatest-user>',
key: process.env.LT_ACCESS_KEY || '<lambdatest-accesskey>',
v: true
};Logfile You can provide a specific path to this file. If you won't provide a path then the logs would be saved in your present working directory by the filename: tunnel.log. For providing a specific path use the below argument:
- logFile : path
tunnelArguments = {
user: process.env.LT_USERNAME || '<lambdatest-user>',
key: process.env.LT_ACCESS_KEY || '<lambdatest-accesskey>',
logFile: '/lambdatest/logs.txt'
};- egressOnly: Uses proxy settings only for outbound requests.
- ingressOnly: Uses proxy settings only for inbound requests.
- dns: Comma separated list of dns servers
- sshConnType: Specify type of ssh connection (over_22, over_443, over_ws)
- mode: Specifies in which mode tunnel should run [ssh,ws]
- nows: Force tunnel to run in non websocket mode
- mitm: MITM mode, used for testing websites with private certificates
Our GitHub Issue Tracker will help you log bug reports.
Tips for submitting an issue: Keep in mind, you don't end up submitting two issues with the same information. Make sure you add a unique input in every issue that you submit. You could also provide a "+1" value in the comments.
Always provide the steps to reproduce before you submit a bug. Provide the environment details where you received the issue i.e. Browser Name, Browser Version, Operating System, Screen Resolution and more. Describe the situation that led to your encounter with bug. Describe the expected output, and the actual output precisely.
We don't want to pull breaks in case you want to customize your TestMu AI experience. Before you proceed with implementing pull requests, keep in mind the following. Make sure you stick to coding conventions. Once you include tests, ensure that they all pass. Make sure to clean up your Git history, prior your submission of a pull-request. You can do so by using the interactive rebase command for committing and squashing, simultaneously with minor changes + fixes into the corresponding commits.
TestMu AI (formerly LambdaTest) is a full-stack, agentic AI quality engineering platform that helps teams test smarter and ship faster. Built AI-native from the ground up, it provides end-to-end AI agents that can plan, author, execute, and analyze software testing across the entire development lifecycle.
Designed for scale, TestMu AI enables seamless testing of web, mobile, and enterprise applications on real devices, real browsers, and customizable real-world environments—empowering teams to deliver high-quality releases with speed and confidence.
It's a perfect solution to bring your selenium automation testing to cloud based infrastructure that not only helps you increase your test coverage over multiple desktop and mobile browsers, but also allows you to cut down your test execution time by running tests on parallel.