Ramses Scripts Upload
This commit is contained in:
97
local/path/home/Backdoor.js
Normal file
97
local/path/home/Backdoor.js
Normal file
@@ -0,0 +1,97 @@
|
||||
/** @param {NS} ns */
|
||||
export async function main(ns, allServers) {
|
||||
ns.tail();
|
||||
await scanRecursiveWrapper(ns);
|
||||
let currentHackingLevel = ns.getHackingLevel();
|
||||
let currentArray = [];
|
||||
let currentHop = "";
|
||||
let serverRoutes = JSON.parse(ns.read("ServerRouteList.txt"));
|
||||
let allPaths = getPaths(serverRoutes);
|
||||
let checkAll = ns.args[0];
|
||||
for (const entry of allPaths) {
|
||||
for (const name of entry) {
|
||||
if (ns.singularity.connect(name) === false) {
|
||||
ns.tprint("Error when trying to connect to: " + currentHop);
|
||||
return
|
||||
}
|
||||
|
||||
if (ns.getServer(name).hostname === "CSEC" || ns.getServer(name).hostname === "avmnite-02h" || ns.getServer(name).hostname === "I.I.I.I" || ns.getServer(name).hostname === "run4theh111z" || ns.getServer(name).hostname === "The-Cave" || checkAll === true ) {
|
||||
if (!ns.getServer(name).backdoorInstalled) {
|
||||
if (ns.getServerRequiredHackingLevel(name) < currentHackingLevel && ns.hasRootAccess(name) === true) {
|
||||
ns.print("Trying to backdoor " + name)
|
||||
await ns.singularity.installBackdoor(name);
|
||||
ns.print("Success on " + name)
|
||||
}
|
||||
} else { continue }
|
||||
}
|
||||
}
|
||||
}
|
||||
ns.singularity.connect("home");
|
||||
}
|
||||
|
||||
function getPaths(obj, path = []) {
|
||||
const paths = [];
|
||||
for (const key in obj) {
|
||||
const newPath = [...path, key];
|
||||
paths.push(newPath);
|
||||
if (typeof obj[key] === 'object' && obj[key] !== null) {
|
||||
paths.push(...getPaths(obj[key], newPath));
|
||||
}
|
||||
}
|
||||
return paths;
|
||||
}
|
||||
|
||||
/** @param {NS} ns */
|
||||
async function scanRecursiveWrapper(ns) {
|
||||
ns.rm("ServerRouteList.txt");
|
||||
const home = "home";
|
||||
let serverRouteList = { home: {} };
|
||||
let knownServers = [];
|
||||
let unscanned = [];
|
||||
unscanned.push(home);
|
||||
knownServers.push(home);
|
||||
while (unscanned.length > 0) {
|
||||
let currentServer = unscanned.pop();
|
||||
let currentChildren = ns.scan(currentServer).filter(element => !knownServers.includes(element));
|
||||
knownServers = knownServers.concat(currentChildren);
|
||||
let keyPath = findKeyPath(serverRouteList, currentServer);
|
||||
let childrenObject = currentChildren.reduce((a, v) => ({ ...a, [v]: {} }), {});
|
||||
writeValueToPath(serverRouteList, keyPath, childrenObject);
|
||||
for (let i = 0; i < currentChildren.length; i++) {
|
||||
let child = currentChildren[i];
|
||||
unscanned.push(child);
|
||||
}
|
||||
}
|
||||
ns.write("ServerRouteList.txt", JSON.stringify(serverRouteList), "w");
|
||||
}
|
||||
|
||||
function findKeyPath(json, key) {
|
||||
if (typeof json !== 'object' || json === null) {
|
||||
return null;
|
||||
}
|
||||
if (key in json) {
|
||||
return key;
|
||||
}
|
||||
for (const property in json) {
|
||||
if (json.hasOwnProperty(property)) {
|
||||
const path = findKeyPath(json[property], key);
|
||||
if (path !== null) {
|
||||
return property + '*' + path;
|
||||
}
|
||||
}
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
function writeValueToPath(json, path, value) {
|
||||
const parts = path.split('*');
|
||||
let currentObject = json;
|
||||
for (let i = 0; i < parts.length - 1; i++) {
|
||||
const part = parts[i];
|
||||
if (currentObject[part] === undefined) {
|
||||
currentObject[part] = {};
|
||||
}
|
||||
currentObject = currentObject[part];
|
||||
}
|
||||
currentObject[parts[parts.length - 1]] = value;
|
||||
}
|
||||
14
local/path/home/CorpControl.js
Normal file
14
local/path/home/CorpControl.js
Normal file
@@ -0,0 +1,14 @@
|
||||
/** @param {NS} ns */
|
||||
export async function main(ns) {
|
||||
let cities = ["Sector-12", "Aevum", "Volhaven", "Chongqing", "New Tokyo", "Ishima"];
|
||||
let corpName = "AgraNeo";
|
||||
//ns.tprint(ns.corporation.getConstants())
|
||||
//ns.corporation.getMaterial();
|
||||
//ns.corporation.buyMaterial();
|
||||
for (let city of cities) {
|
||||
await ns.run("/corp/Smart.js",1,corpName,city);
|
||||
await ns.tprint(ns.run("/corp/UpgradeOffice.js",1,corpName,city));
|
||||
await ns.sleep(1000)
|
||||
}
|
||||
|
||||
}
|
||||
8
local/path/home/CrackAndRootAll.js
Normal file
8
local/path/home/CrackAndRootAll.js
Normal file
@@ -0,0 +1,8 @@
|
||||
|
||||
import { getCracks, scanServerList, findBestTarget, crackingAndRooting, copyAndRunScript } from "/RamsesUtils.js";
|
||||
/** @param {NS} ns */
|
||||
export async function main(ns) {
|
||||
let cracks = {};
|
||||
cracks = getCracks(ns);
|
||||
crackingAndRooting(ns, cracks, "", false);
|
||||
}
|
||||
6
local/path/home/FactionBoost.js
Normal file
6
local/path/home/FactionBoost.js
Normal file
@@ -0,0 +1,6 @@
|
||||
/** @param {NS} ns */
|
||||
export async function main(ns) {
|
||||
while (true) {
|
||||
await ns.share();
|
||||
}
|
||||
}
|
||||
16
local/path/home/FactionRAMShare.js
Normal file
16
local/path/home/FactionRAMShare.js
Normal file
@@ -0,0 +1,16 @@
|
||||
/** @param {NS} ns */
|
||||
export async function main(ns) {
|
||||
let serverList = JSON.parse(ns.read("serverList.txt"));
|
||||
for (const [name, entry] of Object.entries(serverList)) {
|
||||
if (entry.rootAccess === true) {
|
||||
|
||||
ns.print("killed threads on: " + name + " " +ns.killall(name, true));
|
||||
//move script and run
|
||||
let maxRam = ns.getServerMaxRam(name);
|
||||
if (maxRam > 0) {
|
||||
ns.scp("FactionBoost.js", name, "home");
|
||||
let maxProcesses = 1;
|
||||
maxProcesses = Math.floor(maxRam / 4);
|
||||
if (name === "home") maxProcesses = maxProcesses - 50;
|
||||
ns.exec("FactionBoost.js", name, maxProcesses);
|
||||
}}}}
|
||||
70
local/path/home/RMbreach.js
Normal file
70
local/path/home/RMbreach.js
Normal file
@@ -0,0 +1,70 @@
|
||||
/** @param {NS} ns */
|
||||
export async function main(ns) {
|
||||
//args
|
||||
const sTarget = ns.args[0]; // target server
|
||||
|
||||
// declare objects
|
||||
const oHome = ns.getServer("home");
|
||||
//const oTarget = ns.getServer(sTarget);
|
||||
|
||||
//declare variables
|
||||
const sWeakenScript = "RMweaken.js";
|
||||
const sBatch = "RMcontroller.js";
|
||||
|
||||
const nCores = oHome.cpuCores;
|
||||
let nSecurity = ns.getServerSecurityLevel(sTarget);
|
||||
const nMinSecurity = ns.getServerMinSecurityLevel(sTarget);
|
||||
const nWeakenSTR = ns.weakenAnalyze(1, nCores);
|
||||
let nThreads = Math.ceil((nSecurity - nMinSecurity) / nWeakenSTR);
|
||||
let nFreeRam = ns.getServerMaxRam("home") - ns.getServerUsedRam("home");
|
||||
ns.tail("RMbreach.js", "home", sTarget);
|
||||
//ns.resizeTail(815, 395);
|
||||
//ns.moveTail(1925, 0);
|
||||
|
||||
// crack target
|
||||
// ns.run(sCrack, 1, sTarget);
|
||||
|
||||
if (nThreads > 0 && nSecurity > nMinSecurity) {
|
||||
const nDelay = ns.getWeakenTime(sTarget);
|
||||
|
||||
ns.tprint("current security is: " + nSecurity);
|
||||
ns.tprint("minimum security is: " + nMinSecurity);
|
||||
ns.tprint("threads needed for weaken: " + nThreads);
|
||||
ns.tprint(nThreads + " will reduce Security by " + ns.weakenAnalyze(nThreads, nCores));
|
||||
let nScriptRAM = ns.getScriptRam(sWeakenScript, "home");
|
||||
let nRequiredRAM = nScriptRAM * nThreads;
|
||||
ns.tprint(nThreads + " of " + sWeakenScript + " requires " + nRequiredRAM + " GB of RAM");
|
||||
ns.tprint("weakening will take " + (nDelay / 1000 / 60) + " minutes");
|
||||
|
||||
if (nFreeRam > nRequiredRAM) {
|
||||
ns.run(sWeakenScript, nThreads, sTarget);
|
||||
|
||||
await ns.sleep(Math.ceil(nDelay));
|
||||
|
||||
nSecurity = ns.getServerSecurityLevel(sTarget);
|
||||
ns.tprint("Breach complete, security level is now at: " + nSecurity);
|
||||
|
||||
}
|
||||
else {
|
||||
ns.print("not enough RAM to run all threads at once, splitting into smaller chunks...");
|
||||
while (nSecurity > nMinSecurity) {
|
||||
nThreads /= (1+(nRequiredRAM / nFreeRam));
|
||||
ns.print(Math.ceil(nRequiredRAM / nFreeRam));
|
||||
ns.print(nThreads);
|
||||
ns.print(nThreads * nScriptRAM);
|
||||
ns.run(sWeakenScript, Math.ceil(nThreads), sTarget);
|
||||
await ns.sleep(Math.ceil(nDelay));
|
||||
nSecurity = ns.getServerSecurityLevel(sTarget);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
//run batch
|
||||
const nBatchPID = ns.run(sBatch, 1, sTarget);
|
||||
ns.tail(nBatchPID, "home", sBatch, 1, sTarget);
|
||||
ns.resizeTail(815, 395, nBatchPID);
|
||||
ns.moveTail(1925, 0, nBatchPID);
|
||||
|
||||
}
|
||||
126
local/path/home/RMcontroller.js
Normal file
126
local/path/home/RMcontroller.js
Normal file
@@ -0,0 +1,126 @@
|
||||
/** @param {NS} ns */
|
||||
export async function main(ns) {
|
||||
//Arguments
|
||||
const sTarget = ns.args[0]; // target server
|
||||
|
||||
ns.tail("RMcontroller.js", "home", sTarget);
|
||||
|
||||
//Settings
|
||||
const oHome = ns.getServer("home");
|
||||
const nCores = oHome.cpuCores;
|
||||
const sScript = ns.getScriptName();
|
||||
const sWeaken = "RMweaken.js";
|
||||
const sGrow = "RMgrow.js";
|
||||
const sHack = "RMhack.js";
|
||||
const nScriptRAM = ns.getScriptRam(sScript, "home");
|
||||
const nWeakenRAM = ns.getScriptRam(sWeaken, "home");
|
||||
const nGrowRAM = ns.getScriptRam(sGrow, "home");
|
||||
const nHackRAM = ns.getScriptRam(sHack, "home");
|
||||
const nHomeUsedRAM = ns.getServerUsedRam("home");
|
||||
const nHomeMaxRAM = ns.getServerMaxRam("home");
|
||||
let nHomeFreeRAM = nHomeMaxRAM - nHomeUsedRAM;
|
||||
|
||||
const nDelays = [0, 20, 40, 60];
|
||||
|
||||
|
||||
//abort script if sTarget is undefined
|
||||
if (sTarget === undefined) {
|
||||
ns.tprint("1st arg sTarget is undefined");
|
||||
return false;
|
||||
}
|
||||
|
||||
//target server info
|
||||
const nMinSecurity = ns.getServerMinSecurityLevel(sTarget);
|
||||
const nMaxMoney = ns.getServerMaxMoney(sTarget);
|
||||
|
||||
let nWeakenTime1 = ns.getWeakenTime(sTarget);
|
||||
let nWeakenTime2 = nWeakenTime1;
|
||||
let nGrowTime = nWeakenTime1 * 0.8;
|
||||
let nHackTime = nWeakenTime1 / 4;
|
||||
|
||||
//let nHackSecurityGain = ns.hackAnalyzeSecurity(1, sTarget);
|
||||
//let nHackSecurityGain = 0.002;
|
||||
//let nHackThreadsEstimate = Math.max(Math.floor(1 / nHackSecurityGain),1);
|
||||
//let nHackThreadsEstimate = 10;
|
||||
//ns.tprint("nHackSecurityGain = " + nHackSecurityGain);
|
||||
//ns.tprint("nHackThreadsEstimate = " + nHackThreadsEstimate);
|
||||
const nHackTotalRAM = nHackRAM * 25;
|
||||
|
||||
//let nGrowSecurityGain = ns.growthAnalyzeSecurity(1, sTarget, nCores);
|
||||
//let nGrowSecurityGain = 0.004;
|
||||
//let nGrowThreadsEstimate = Math.max(Math.floor(1 / nGrowSecurityGain),1);
|
||||
//ns.tprint("nGrowSecurityGain = " + nGrowSecurityGain);
|
||||
//ns.tprint("nGrowThreadsEstimate = " + nGrowThreadsEstimate);
|
||||
const nGrowTotalRAM = nGrowRAM * 13;
|
||||
|
||||
//let nWeakenSecurity = ns.weakenAnalyze(1, nCores);
|
||||
//let nWeakenSecurity = 0.05;
|
||||
//let nWeakenThreadsEstimate = Math.max(Math.ceil(1 / nWeakenSecurity),1);
|
||||
//ns.tprint("nWeakenSecurity = " + nWeakenSecurity);
|
||||
//ns.tprint("nWeakenThreadsEstimate = " + nWeakenThreadsEstimate);
|
||||
const nWeakenTotalRAM = nWeakenRAM * 1;
|
||||
|
||||
const nTotalRAM = nHackTotalRAM + nGrowTotalRAM + (nWeakenTotalRAM * 2)
|
||||
const nTotalBatches = Math.floor((nHomeFreeRAM - nScriptRAM) / nTotalRAM);
|
||||
|
||||
let nHackThreadsEstimate = nTotalBatches * 25;
|
||||
let nWeakenThreadsEstimate1 = nTotalBatches * 1;
|
||||
let nGrowThreadsEstimate = nTotalBatches * 13;
|
||||
let nWeakenThreadsEstimate2 = nTotalBatches * 1;
|
||||
|
||||
ns.tprint("RAM per Cycle = " + nTotalRAM);
|
||||
ns.tprint("how many batches can i run at the same time? = " + nTotalBatches);
|
||||
|
||||
//await ns.grow(server, { additionalMsec: nMsecDelay });
|
||||
let nGrowDelay = nWeakenTime1 - nGrowTime;
|
||||
let nHackDelay = nWeakenTime1 - nHackTime;
|
||||
|
||||
const nCycleDuration = nWeakenTime2 + nDelays[3];
|
||||
ns.tprint("nCycleDuration = " + nCycleDuration);
|
||||
|
||||
const nBatchFrequency = Math.ceil(nCycleDuration / nTotalBatches);
|
||||
ns.tprint("nBatchFrequency = " + nBatchFrequency);
|
||||
|
||||
while (true) {
|
||||
|
||||
//server stats
|
||||
let nCurrentSecurity = ns.getServerSecurityLevel(sTarget);
|
||||
let nCurrentMoney = ns.getServerMoneyAvailable(sTarget);
|
||||
|
||||
//timestamp
|
||||
let currentDate = new Date();
|
||||
let nOffset;
|
||||
|
||||
ns.print("Cash: " + (Math.floor(nCurrentMoney * 1000) / 1000) + " / " + nMaxMoney);
|
||||
ns.print("Security: " + (Math.floor(nCurrentSecurity * 1000) / 1000) + " / " + nMinSecurity);
|
||||
|
||||
//Calculate estimate time of completion
|
||||
nOffset = ns.getWeakenTime(sTarget);
|
||||
let nSafeTime = nOffset + nDelays[3]+1000;
|
||||
let nWeakTime = new Date(currentDate.getTime() + nSafeTime);
|
||||
let sWeakTime = nWeakTime.toLocaleTimeString('sw-SV'); //swedish time
|
||||
|
||||
//Print estimated time of completion
|
||||
ns.print("Weakening " + sTarget + " Estimated complete at " + sWeakTime);
|
||||
|
||||
//hack
|
||||
const nHackPID = ns.exec(sHack, "home", nHackThreadsEstimate, sTarget, false, nHackDelay + nDelays[0]);
|
||||
//ns.tail(nHackPID, "home", "home", nHackThreadsEstimate, sTarget, 0, nHackDelay + nDelays[0]);
|
||||
|
||||
//weaken 1
|
||||
const nWeakenPID = ns.exec(sWeaken, "home", nWeakenThreadsEstimate1, sTarget, false, nDelays[1]);
|
||||
//ns.tail(nWeakenPID, "home", "home", nWeakenThreadsEstimate, sTarget, 0, nDelays[1]);
|
||||
|
||||
//grow
|
||||
const nGrowPID = ns.exec(sGrow, "home", nGrowThreadsEstimate, sTarget, false, nGrowDelay + nDelays[2]);
|
||||
//ns.tail(nGrowPID, "home", "home", nGrowThreadsEstimate, sTarget, 0, nGrowDelay + nDelays[2]);
|
||||
|
||||
//weaken 2
|
||||
const nWeakenPID2 = ns.exec(sWeaken, "home", nWeakenThreadsEstimate2, sTarget, false, nDelays[3]);
|
||||
//ns.tail(nWeakenPID2, "home", "home", nWeakenThreadsEstimate, sTarget, 0, nDelays[3]);
|
||||
|
||||
await ns.sleep(nSafeTime);
|
||||
|
||||
}
|
||||
|
||||
}
|
||||
14
local/path/home/RMgrow.js
Normal file
14
local/path/home/RMgrow.js
Normal file
@@ -0,0 +1,14 @@
|
||||
/** @param {NS} ns */
|
||||
export async function main(ns) {
|
||||
ns.print(Date.now());
|
||||
const sTarget = ns.args[0]; // target server
|
||||
const bRepeat = ns.args[1]; // should this script loop
|
||||
const nMsecDelay = ns.args[2]; // MsecDelay
|
||||
|
||||
while (bRepeat === true) {
|
||||
await ns.grow(sTarget, { additionalMsec: nMsecDelay });
|
||||
}
|
||||
|
||||
await ns.grow(sTarget, { additionalMsec: nMsecDelay });
|
||||
ns.print(Date.now());
|
||||
}
|
||||
14
local/path/home/RMhack.js
Normal file
14
local/path/home/RMhack.js
Normal file
@@ -0,0 +1,14 @@
|
||||
/** @param {NS} ns */
|
||||
export async function main(ns) {
|
||||
ns.print(Date.now());
|
||||
const sTarget = ns.args[0]; // target server
|
||||
const bRepeat = ns.args[1]; // should this script loop
|
||||
const nMsecDelay = ns.args[2]; // MsecDelay
|
||||
|
||||
while (bRepeat === true) {
|
||||
await ns.hack(sTarget, { additionalMsec: nMsecDelay });
|
||||
}
|
||||
|
||||
await ns.hack(sTarget, { additionalMsec: nMsecDelay });
|
||||
ns.print(Date.now());
|
||||
}
|
||||
14
local/path/home/RMweaken.js
Normal file
14
local/path/home/RMweaken.js
Normal file
@@ -0,0 +1,14 @@
|
||||
/** @param {NS} ns */
|
||||
export async function main(ns) {
|
||||
ns.print(Date.now());
|
||||
const sTarget = ns.args[0]; // target server
|
||||
const bRepeat = ns.args[1]; // should this script loop
|
||||
const nMsecDelay = ns.args[2]; // MsecDelay
|
||||
|
||||
while (bRepeat === true) {
|
||||
await ns.weaken(sTarget, { additionalMsec: nMsecDelay });
|
||||
}
|
||||
|
||||
await ns.weaken(sTarget, { additionalMsec: nMsecDelay });
|
||||
ns.print(Date.now());
|
||||
}
|
||||
5
local/path/home/Ramses-grow.js
Normal file
5
local/path/home/Ramses-grow.js
Normal file
@@ -0,0 +1,5 @@
|
||||
/** @param {NS} ns */
|
||||
export async function main(ns) {
|
||||
const sServer = ns.args[0];
|
||||
await ns.grow(sServer);
|
||||
}
|
||||
5
local/path/home/Ramses-hack.js
Normal file
5
local/path/home/Ramses-hack.js
Normal file
@@ -0,0 +1,5 @@
|
||||
/** @param {NS} ns */
|
||||
export async function main(ns) {
|
||||
const sServer = ns.args[0];
|
||||
await ns.hack(sServer);
|
||||
}
|
||||
5
local/path/home/Ramses-weaken.js
Normal file
5
local/path/home/Ramses-weaken.js
Normal file
@@ -0,0 +1,5 @@
|
||||
/** @param {NS} ns */
|
||||
export async function main(ns) {
|
||||
const sServer = ns.args[0];
|
||||
await ns.weaken(sServer);
|
||||
}
|
||||
278
local/path/home/RamsesUtils.js
Normal file
278
local/path/home/RamsesUtils.js
Normal file
@@ -0,0 +1,278 @@
|
||||
/** @param {NS} ns */
|
||||
export async function main(ns) {
|
||||
ns.tprint("This is just a function library, it doesn't do anything.");
|
||||
}
|
||||
|
||||
/** @param {NS} ns */
|
||||
export function getCracks(ns) {
|
||||
let cracks = {};
|
||||
if (ns.fileExists("BruteSSH.exe", "home")) {
|
||||
cracks["BruteSSH.exe"] = ns.brutessh;
|
||||
};
|
||||
if (ns.fileExists("FTPCrack.exe", "home")) {
|
||||
cracks["FTPCrack.exe"] = ns.ftpcrack;
|
||||
};
|
||||
if (ns.fileExists("relaySMTP.exe", "home")) {
|
||||
cracks["relaySMTP.exe"] = ns.relaysmtp;
|
||||
};
|
||||
if (ns.fileExists("HTTPWorm.exe", "home")) {
|
||||
cracks["HTTPWorm.exe"] = ns.httpworm;
|
||||
};
|
||||
if (ns.fileExists("SQLInject.exe", "home")) {
|
||||
cracks["SQLInject.exe"] = ns.sqlinject;
|
||||
};
|
||||
return cracks;
|
||||
}
|
||||
|
||||
/** @param {NS} ns */
|
||||
export function scanServerList(ns) {
|
||||
const home = "home";
|
||||
let serverList = {};
|
||||
let unscanned = [];
|
||||
unscanned.push(home);
|
||||
while (unscanned.length > 0) {
|
||||
let currentServer = unscanned.pop();
|
||||
if (!serverList[currentServer]) {
|
||||
let maxRam = ns.getServerMaxRam(currentServer);
|
||||
let minPorts = ns.getServerNumPortsRequired(currentServer);
|
||||
let minSecLevel = ns.getServerMinSecurityLevel(currentServer);
|
||||
let minHackLevel = ns.getServerRequiredHackingLevel(currentServer);
|
||||
let rootAccess = ns.hasRootAccess(currentServer);
|
||||
let serverMoney = ns.getServerMaxMoney(currentServer);
|
||||
let serverFiles = ns.ls(currentServer);
|
||||
let skillFactor = (2.5 * minHackLevel * minSecLevel + 500) / (ns.getHackingLevel() + 50);
|
||||
let compareTimeFactor = serverMoney / skillFactor / 10e7;
|
||||
serverList[currentServer] =
|
||||
{
|
||||
serverName: currentServer,
|
||||
maxRam: maxRam,
|
||||
maxMoney: serverMoney,
|
||||
minSec: minSecLevel,
|
||||
minPorts: minPorts,
|
||||
minHackLvl: minHackLevel,
|
||||
rootAccess: rootAccess,
|
||||
factorMoneyPerTime: compareTimeFactor,
|
||||
openPorts: 0,
|
||||
serverFiles: serverFiles,
|
||||
};
|
||||
let neighbours = ns.scan(currentServer);
|
||||
for (let i = 0; i < neighbours.length; i++) {
|
||||
let neighbour = neighbours[i];
|
||||
if (serverList[neighbour]) {
|
||||
continue
|
||||
}
|
||||
unscanned.push(neighbour);
|
||||
}
|
||||
}
|
||||
}
|
||||
ns.write("serverList.txt", JSON.stringify(serverList), "w");
|
||||
}
|
||||
|
||||
/** @param {NS} ns */
|
||||
export function findBestTarget(ns, maxSec, maxPorts, currentHackLevel, manualTargetOverride) {
|
||||
if (!ns.fileExists("serverList.txt", "home")) scanServerList();
|
||||
let serverList = JSON.parse(ns.read("serverList.txt"));
|
||||
let bestEntry = null;
|
||||
let compareTime = 0;
|
||||
for (const [name, entry] of Object.entries(serverList)) {
|
||||
if (entry.minSec <= maxSec && entry.minPorts <= maxPorts && entry.minHackLvl < currentHackLevel) {
|
||||
if (entry.factorMoneyPerTime > compareTime) {
|
||||
compareTime = entry.factorMoneyPerTime;
|
||||
bestEntry = name;
|
||||
}
|
||||
}
|
||||
}
|
||||
if (manualTargetOverride.length > 0) {
|
||||
bestEntry = manualTargetOverride;
|
||||
}
|
||||
ns.write("bestTarget.txt", JSON.stringify(serverList[bestEntry]), "w");
|
||||
}
|
||||
|
||||
|
||||
/** @param {NS} ns */
|
||||
export function crackingAndRooting(ns, cracks, funnyScript, copy) {
|
||||
if (!ns.fileExists("serverList.txt", "home")) scanServerList();
|
||||
let serverList = JSON.parse(ns.read("serverList.txt"));
|
||||
for (const [name, entry] of Object.entries(serverList)) {
|
||||
let cracked = false;
|
||||
let openPorts = serverList[name].openPorts || 0;
|
||||
if (entry.minPorts === 0 || (entry.minPorts > openPorts && entry.minPorts <= Object.keys(cracks).length)) {
|
||||
for (let k = 0; k < entry.minPorts; k++) {
|
||||
cracks[Object.keys(cracks)[k]](name);
|
||||
serverList[name].openPorts = k;
|
||||
}
|
||||
cracked = true;
|
||||
}
|
||||
if (!ns.hasRootAccess(name) && cracked === true) {
|
||||
ns.nuke(name);
|
||||
if (ns.hasRootAccess(name)) {
|
||||
serverList[name].rootAccess = true;
|
||||
if (serverList[name].maxRam > 0 && copy === true) {
|
||||
copyAndRunScript(ns, funnyScript, name);
|
||||
}
|
||||
}
|
||||
}
|
||||
ns.write("serverList.txt", JSON.stringify(serverList), "w");
|
||||
}
|
||||
ns.tprint("Cracking and rooting done");
|
||||
}
|
||||
|
||||
|
||||
/** @param {NS} ns */
|
||||
export function copyAndRunScript(ns, funnyScript, currentServer) {
|
||||
// change to run for one specific server with bestTarget from file
|
||||
//let minRam = ns.getScriptRam(funnyScript);
|
||||
let bestTarget = JSON.parse(ns.read("bestTarget.txt"));
|
||||
|
||||
let name = currentServer;
|
||||
let serverList = JSON.parse(ns.read("serverList.txt"));
|
||||
ns.print(name);
|
||||
if (serverList[name].rootAccess === true && serverList[bestTarget.serverName].rootAccess === true) {
|
||||
if (name !== "home") {
|
||||
ns.print("killed threads on: " + name + ns.killall(name, true));
|
||||
} else {
|
||||
ns.print("killed threads on: " + name + ns.scriptKill(funnyScript[0], name));
|
||||
};
|
||||
//move script and run
|
||||
if (serverList[name].maxRam > 0) {
|
||||
ns.scp(funnyScript, name, "home");
|
||||
let maxProcesses = 1;
|
||||
if (serverList[name].maxRam >= 8) {
|
||||
maxProcesses = Math.max(Math.floor((serverList[name].maxRam) / 8), 1);
|
||||
} else {
|
||||
maxProcesses = 1
|
||||
};
|
||||
|
||||
for (let n = 1; n <= maxProcesses; n++) {
|
||||
ns.exec(funnyScript[0], name, 1, bestTarget.serverName);
|
||||
}
|
||||
|
||||
/*let maxThreads = 0;
|
||||
if (name === "home") {
|
||||
maxThreads = Math.floor((serverList[name].maxRam - ns.getServerUsedRam(name) - 32) / minRam);
|
||||
ns.print(name + " " + maxThreads);
|
||||
} else {
|
||||
ns.print(name);
|
||||
maxThreads = Math.floor(serverList[name].maxRam / minRam);
|
||||
ns.print(name + " " + maxThreads);
|
||||
};
|
||||
while (maxThreads > 0) {
|
||||
let threadsToAssign = maxThreads < 500 ? maxThreads : 500;
|
||||
if (ns.exec(funnyScript, name, threadsToAssign, bestTarget.serverName, serverList[bestTarget.serverName].minSec, serverList[bestTarget.serverName].maxMoney, JSON.stringify(serverList[bestTarget.serverName])) !== 0) {
|
||||
ns.print("Executing script on: " + name + " with: " + threadsToAssign + " threads out of " + maxThreads + " total threads");
|
||||
maxThreads = maxThreads - threadsToAssign;
|
||||
} else {
|
||||
ns.tprint("Error running script on: " + name);
|
||||
maxThreads = -1;
|
||||
};
|
||||
}*/
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/** @param {NS} ns */
|
||||
export async function purchaseAndUpgradeServers(ns) {
|
||||
ns.disableLog("sleep");
|
||||
ns.disableLog("getServerMoneyAvailable");
|
||||
ns.disableLog("getServerMaxRam");
|
||||
let maxPurchasedServers = ns.getPurchasedServerLimit();
|
||||
let purchasedServers = [];
|
||||
let count = listPurchasedServers(ns).length;
|
||||
let currentMoney = 0;
|
||||
let serverList = {};
|
||||
while (count < maxPurchasedServers) {
|
||||
purchasedServers = listPurchasedServers(ns);
|
||||
currentMoney = ns.getServerMoneyAvailable("home");
|
||||
let targetRamInitial = 16;
|
||||
if (ns.getPurchasedServerCost(targetRamInitial) < currentMoney) {
|
||||
let hostname = ns.purchaseServer("pserv-" + purchasedServers.length, 16);
|
||||
|
||||
count = listPurchasedServers(ns).length;
|
||||
serverList = JSON.parse(ns.read("serverList.txt"));
|
||||
serverList[hostname] = {
|
||||
serverName: hostname,
|
||||
maxRam: 16,
|
||||
maxMoney: 0,
|
||||
minSec: 0,
|
||||
minPorts: 5,
|
||||
minHackLvl: 1,
|
||||
rootAccess: true,
|
||||
factorMoneyPerTime: 99999999,
|
||||
openPorts: 0,
|
||||
};
|
||||
ns.write("serverList.txt", JSON.stringify(serverList), "w");
|
||||
continue
|
||||
} else {
|
||||
await ns.sleep(5000);
|
||||
}
|
||||
}
|
||||
|
||||
let i = 5;
|
||||
while (i < 21) {
|
||||
let targetRam = 2 ** i;
|
||||
purchasedServers = listPurchasedServers(ns);
|
||||
for (let currentServer of purchasedServers) {
|
||||
currentMoney = ns.getServerMoneyAvailable("home");
|
||||
|
||||
if (ns.getServerMaxRam(currentServer) < targetRam && ns.getPurchasedServerUpgradeCost(currentServer, targetRam) < currentMoney) {
|
||||
if (ns.upgradePurchasedServer(currentServer, targetRam)) {
|
||||
ns.print(currentServer + " upgraded to " + targetRam + " GB RAM");
|
||||
serverList = JSON.parse(ns.read("serverList.txt"));
|
||||
serverList[currentServer].maxRam = targetRam;
|
||||
ns.write("serverList.txt", JSON.stringify(serverList), "w");
|
||||
}
|
||||
} else {
|
||||
await ns.sleep(5000);
|
||||
continue
|
||||
};
|
||||
}
|
||||
++i;
|
||||
}
|
||||
ns.tprint("Extiting purchaseServers script!")
|
||||
}
|
||||
|
||||
/** @param {NS} ns */
|
||||
function listPurchasedServers(ns) {
|
||||
return ns.getPurchasedServers();
|
||||
}
|
||||
|
||||
/** @param {NS} ns */
|
||||
export async function runControllerOnPserv(ns) {
|
||||
let purchasedServers = listPurchasedServers(ns);
|
||||
let nPID = 0;
|
||||
nPID = ns.exec("S2controller.js", "home");
|
||||
ns.tprint("Started S2controller.js on " + "home" + " with PID " + nPID)
|
||||
for (let currentServer of purchasedServers) {
|
||||
ns.scp(["S2tGrow.js", "S2tWeaken.js", "S2tHack.js", "S2controller.js", "S2utils.js"], currentServer, "home");
|
||||
nPID = ns.exec("S2controller.js", currentServer);
|
||||
if (nPID > 0) {
|
||||
ns.tprint("Started S2controller.js on " + currentServer + " with PID " + nPID)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/** @param {NS} ns */
|
||||
export async function backdoor(ns) {
|
||||
let serverList = JSON.parse(ns.read("serverList.txt"));
|
||||
let lasthackingLevel = 0;
|
||||
let currentHackingLevel = 0;
|
||||
while (true) {
|
||||
currentHackingLevel = ns.getHackingLevel();
|
||||
if (currentHackingLevel > lasthackingLevel) {
|
||||
lasthackingLevel = currentHackingLevel;
|
||||
for (const [name, entry] of Object.entries(serverList)) {
|
||||
if (entry.minHackLvl <= lasthackingLevel && entry.hasBackdoor !== true) {
|
||||
ns.singularity.connect(name);
|
||||
await ns.singularity.installBackdoor();
|
||||
ns.singularity.connect("home");
|
||||
serverList[name].hasBackdoor = true;
|
||||
ns.tprint("Backdoor on: " + name);
|
||||
}
|
||||
}
|
||||
ns.write("serverList.txt", JSON.stringify(serverList), "w");
|
||||
} else {
|
||||
await ns.sleep(30000)
|
||||
};
|
||||
|
||||
}
|
||||
}
|
||||
297
local/path/home/S2controller.js
Normal file
297
local/path/home/S2controller.js
Normal file
@@ -0,0 +1,297 @@
|
||||
/*
|
||||
Welcome to part 2. I'll only be commenting on things that have changed from the previous part, so if there's something
|
||||
confusing, be sure to go back and look at part 1 for more detailed explanations.
|
||||
|
||||
For part 2, we'll be making a protobatcher. Essentially that means we'll be running our previous version in a constant loop.
|
||||
To facilitate this, and because otherwise there wouldn't really be much to this part, we're going to refine the way our
|
||||
scripts communicate with each other using ports.
|
||||
*/
|
||||
|
||||
import { getServers, copyScripts, checkTarget, isPrepped, prep } from "/S2utils.js";
|
||||
|
||||
const TYPES = ["hack", "weaken1", "grow", "weaken2"];
|
||||
const WORKERS = ["S2tHack.js", "S2tWeaken.js", "S2tGrow.js"];
|
||||
const SCRIPTS = { hack: "S2tHack.js", weaken1: "S2tWeaken.js", grow: "S2tGrow.js", weaken2: "S2tWeaken.js" };
|
||||
const COSTS = { hack: 1.7, weaken1: 1.75, grow: 1.75, weaken2: 1.75 };
|
||||
const OFFSETS = { hack: 0, weaken1: 1, grow: 2, weaken2: 3 };
|
||||
|
||||
/*
|
||||
Most of the changes are in the main function, so I've moved it up top. I generally prefer having the main function at the
|
||||
top of the file anyway.
|
||||
*/
|
||||
/** @param {NS} ns */
|
||||
export async function main(ns) {
|
||||
await ns.sleep(500);
|
||||
// Moving most of our active feeback to the tail window so that batches finishing don't get swept away.
|
||||
ns.disableLog("ALL");
|
||||
ns.tail();
|
||||
|
||||
// Stick the whole script in a loop. That's it, see you in part 3.
|
||||
// Just kidding, there's a bit more to it.
|
||||
let batchCount = 0;
|
||||
while (true) {
|
||||
// Register a port using the script's unique handle.
|
||||
// I like to keep ports strictly coupled to a specific script, but you can use whatever number you like.
|
||||
const dataPort = ns.getPortHandle(ns.pid);
|
||||
dataPort.clear() // Make sure there's no random data left in the port.
|
||||
|
||||
let target = "n00dles";
|
||||
const servers = getServers(ns, (server) => {
|
||||
// Don't worry if you don't have Formulas, it's not needed at all here.
|
||||
target = checkTarget(ns, server, target, ns.fileExists("Formulas.exe", "home"));
|
||||
copyScripts(ns, server, WORKERS, true);
|
||||
return ns.hasRootAccess(server);
|
||||
});
|
||||
//target = "n00dles";
|
||||
const ramNet = new RamNet(ns, servers);
|
||||
const metrics = new Metrics(ns, target);
|
||||
if (!isPrepped(ns, target)) await prep(ns, metrics, ramNet);
|
||||
optimizeBatch(ns, metrics, ramNet); // The same optimization algorithm works just fine for protobatching.
|
||||
metrics.calculate(ns);
|
||||
|
||||
const batch = [];
|
||||
batchCount++;
|
||||
for (const type of TYPES) {
|
||||
// We've removed the buffer. You'll see why later.
|
||||
metrics.ends[type] = Date.now() + metrics.wTime + metrics.spacer * OFFSETS[type];
|
||||
const job = new Job(type, metrics);
|
||||
job.batch = batchCount; // This is a bit of a hack. We'll do it better in the next part.
|
||||
if (!ramNet.assign(job)) {
|
||||
ns.print(`ERROR: Unable to assign ${type}. Dumping debug info:`);
|
||||
ns.print(job);
|
||||
ns.print(metrics);
|
||||
ramNet.printBlocks(ns);
|
||||
return;
|
||||
}
|
||||
batch.push(job);
|
||||
}
|
||||
|
||||
// We do a bit more during deployment now.
|
||||
for (const job of batch) {
|
||||
job.end += metrics.delay;
|
||||
const jobPid = ns.exec(SCRIPTS[job.type], job.server, { threads: job.threads, temporary: true }, JSON.stringify(job));
|
||||
if (!jobPid) throw new Error(`Unable to deploy ${job.type}`); // If the exec fails for any reason, error out.
|
||||
/*
|
||||
If a worker deploys late, it will communicate back how late it was, so that the other scripts can adjust.
|
||||
Note that for this we use the *worker's* port instead of our controller's port. It's good practice to make
|
||||
sure your ports have a very narrow focus.
|
||||
*/
|
||||
const tPort = ns.getPortHandle(jobPid);
|
||||
await tPort.nextWrite();
|
||||
metrics.delay += tPort.read();
|
||||
}
|
||||
|
||||
const timer = setInterval(() => {
|
||||
ns.clearLog();
|
||||
ns.print(`Hacking \$${ns.formatNumber(metrics.maxMoney * metrics.greed)} from ${metrics.target}`)
|
||||
ns.print(`Running batch: ETA ${ns.tFormat(metrics.ends.weaken2 - Date.now())}`);
|
||||
}, 1000);
|
||||
ns.atExit(() => {
|
||||
clearInterval(timer);
|
||||
});
|
||||
// Wait for the weaken2 worker to report back. For now I've just hardcoded the Job class to tell only
|
||||
// weaken2 to report. This behavior will change later.
|
||||
await dataPort.nextWrite();
|
||||
dataPort.clear(); // For now we don't actually need the information here, we're just using it for timing.
|
||||
clearInterval(timer);
|
||||
}
|
||||
}
|
||||
|
||||
class Job {
|
||||
constructor(type, metrics, server = "none") {
|
||||
this.type = type;
|
||||
this.end = metrics.ends[type];
|
||||
this.time = metrics.times[type];
|
||||
this.target = metrics.target;
|
||||
this.threads = metrics.threads[type];
|
||||
this.cost = this.threads * COSTS[type];
|
||||
this.server = server;
|
||||
this.report = this.type === "weaken2"; // For now, only w2 jobs report.
|
||||
this.port = metrics.port; // This lets the workers know which port to write to.
|
||||
this.batch = 0; // We'll keep track of how many we've run, just because we can.
|
||||
}
|
||||
}
|
||||
|
||||
/** @param {NS} ns */
|
||||
class Metrics {
|
||||
constructor(ns, server) {
|
||||
this.target = server;
|
||||
this.maxMoney = ns.getServerMaxMoney(server);
|
||||
this.money = Math.max(ns.getServerMoneyAvailable(server), 1);
|
||||
this.minSec = ns.getServerMinSecurityLevel(server);
|
||||
this.sec = ns.getServerSecurityLevel(server);
|
||||
this.prepped = isPrepped(ns, server);
|
||||
this.chance = 0;
|
||||
this.wTime = 0;
|
||||
this.delay = 0; // The cumulative delays caused by late jobs.
|
||||
this.spacer = 5;
|
||||
this.greed = 0.1;
|
||||
this.depth = 0; // Still not using this.
|
||||
|
||||
this.times = { hack: 0, weaken1: 0, grow: 0, weaken2: 0 };
|
||||
this.ends = { hack: 0, weaken1: 0, grow: 0, weaken2: 0 };
|
||||
this.threads = { hack: 0, weaken1: 0, grow: 0, weaken2: 0 };
|
||||
|
||||
this.port = ns.pid;
|
||||
}
|
||||
|
||||
calculate(ns, greed = this.greed) {
|
||||
const server = this.target;
|
||||
const maxMoney = this.maxMoney;
|
||||
this.money = ns.getServerMoneyAvailable(server);
|
||||
this.sec = ns.getServerSecurityLevel(server);
|
||||
this.wTime = ns.getWeakenTime(server);
|
||||
this.times.weaken1 = this.wTime;
|
||||
this.times.weaken2 = this.wTime;
|
||||
this.times.hack = this.wTime / 4;
|
||||
this.times.grow = this.wTime * 0.8;
|
||||
this.depth = this.wTime / this.spacer * 4;
|
||||
|
||||
const hPercent = ns.hackAnalyze(server);
|
||||
const amount = maxMoney * greed;
|
||||
const hThreads = Math.max(Math.floor(ns.hackAnalyzeThreads(server, amount)), 1);
|
||||
const tGreed = hPercent * hThreads;
|
||||
const gThreads = Math.ceil(ns.growthAnalyze(server, maxMoney / (maxMoney - maxMoney * tGreed)));
|
||||
this.threads.weaken1 = Math.max(Math.ceil(hThreads * 0.002 / 0.05), 1);
|
||||
this.threads.weaken2 = Math.max(Math.ceil(gThreads * 0.004 / 0.05), 1);
|
||||
this.threads.hack = hThreads;
|
||||
this.threads.grow = gThreads;
|
||||
this.chance = ns.hackAnalyzeChance(server);
|
||||
}
|
||||
}
|
||||
|
||||
/** @param {NS} ns */
|
||||
class RamNet {
|
||||
#blocks = [];
|
||||
#minBlockSize = Infinity;
|
||||
#maxBlockSize = 0;
|
||||
#totalRam = 0;
|
||||
#maxRam = 0;
|
||||
#prepThreads = 0;
|
||||
#index = new Map();
|
||||
constructor(ns, servers) {
|
||||
for (const server of servers) {
|
||||
if (ns.hasRootAccess(server)) {
|
||||
const maxRam = ns.getServerMaxRam(server);
|
||||
const ram = maxRam - ns.getServerUsedRam(server);
|
||||
if (ram >= 1.60) {
|
||||
const block = { server: server, ram: ram };
|
||||
this.#blocks.push(block);
|
||||
if (ram < this.#minBlockSize) this.#minBlockSize = ram;
|
||||
if (ram > this.#maxBlockSize) this.#maxBlockSize = ram;
|
||||
this.#totalRam += ram;
|
||||
this.#maxRam += maxRam;
|
||||
this.#prepThreads += Math.floor(ram / 1.75);
|
||||
}
|
||||
}
|
||||
}
|
||||
this.#sort();
|
||||
this.#blocks.forEach((block, index) => this.#index.set(block.server, index));
|
||||
}
|
||||
|
||||
#sort() {
|
||||
this.#blocks.sort((x, y) => {
|
||||
if (x.server === "home") return 1;
|
||||
if (y.server === "home") return -1;
|
||||
|
||||
return x.ram - y.ram;
|
||||
});
|
||||
}
|
||||
|
||||
getBlock(server) {
|
||||
if (this.#index.has(server)) {
|
||||
return this.#blocks[this.#index.get(server)];
|
||||
} else {
|
||||
throw new Error(`Server ${server} not found in RamNet.`);
|
||||
}
|
||||
}
|
||||
|
||||
get totalRam() {
|
||||
return this.#totalRam;
|
||||
}
|
||||
|
||||
get maxRam() {
|
||||
return this.#maxRam;
|
||||
}
|
||||
|
||||
get maxBlockSize() {
|
||||
return this.#maxBlockSize;
|
||||
}
|
||||
|
||||
get prepThreads() {
|
||||
return this.#prepThreads;
|
||||
}
|
||||
|
||||
assign(job) {
|
||||
const block = this.#blocks.find(block => block.ram >= job.cost);
|
||||
if (block) {
|
||||
job.server = block.server;
|
||||
block.ram -= job.cost;
|
||||
this.#totalRam -= job.cost;
|
||||
return true;
|
||||
} else return false;
|
||||
}
|
||||
|
||||
finish(job) {
|
||||
const block = this.getBlock(job.server);
|
||||
block.ram += job.cost;
|
||||
this.#totalRam += job.cost;
|
||||
}
|
||||
|
||||
cloneBlocks() {
|
||||
return this.#blocks.map(block => ({ ...block }));
|
||||
}
|
||||
|
||||
printBlocks(ns) {
|
||||
for (const block of this.#blocks) ns.print(block);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* @param {NS} ns
|
||||
* @param {Metrics} metrics
|
||||
* @param {RamNet} ramNet
|
||||
*/
|
||||
export function optimizeBatch(ns, metrics, ramNet) {
|
||||
const maxThreads = ramNet.maxBlockSize / 1.75;
|
||||
const maxMoney = metrics.maxMoney;
|
||||
const hPercent = ns.hackAnalyze(metrics.target);
|
||||
|
||||
const minGreed = 0.001;
|
||||
const stepValue = 0.001;
|
||||
let greed = 0.99;
|
||||
while (greed > minGreed) {
|
||||
const amount = maxMoney * greed;
|
||||
const hThreads = Math.max(Math.floor(ns.hackAnalyzeThreads(metrics.target, amount)), 1);
|
||||
const tGreed = hPercent * hThreads;
|
||||
const gThreads = Math.ceil(ns.growthAnalyze(metrics.target, maxMoney / (maxMoney - maxMoney * tGreed)));
|
||||
|
||||
if (Math.max(hThreads, gThreads) <= maxThreads) {
|
||||
const wThreads1 = Math.max(Math.ceil(hThreads * 0.002 / 0.05), 1);
|
||||
const wThreads2 = Math.max(Math.ceil(gThreads * 0.004 / 0.05), 1);
|
||||
|
||||
const threadCosts = [hThreads * 1.7, wThreads1 * 1.75, gThreads * 1.75, wThreads2 * 1.75];
|
||||
|
||||
const pRam = ramNet.cloneBlocks();
|
||||
let found;
|
||||
for (const cost of threadCosts) {
|
||||
found = false;
|
||||
for (const block of pRam) {
|
||||
if (block.ram < cost) continue;
|
||||
found = true;
|
||||
block.ram -= cost;
|
||||
break;
|
||||
}
|
||||
if (found) continue;
|
||||
break;
|
||||
}
|
||||
if (found) {
|
||||
metrics.greed = greed;
|
||||
metrics.threads = { hack: hThreads, weaken1: wThreads1, grow: gThreads, weaken2: wThreads2 };
|
||||
return true;
|
||||
}
|
||||
}
|
||||
greed -= stepValue;
|
||||
}
|
||||
throw new Error("Not enough ram to run even a single batch. Something has gone seriously wrong.");
|
||||
}
|
||||
23
local/path/home/S2tGrow.js
Normal file
23
local/path/home/S2tGrow.js
Normal file
@@ -0,0 +1,23 @@
|
||||
/*
|
||||
Very little has changed in the workers. We uncommented a couple of parts to allow for the ping-pong deployment.
|
||||
See the tWeaken.js for full comments.
|
||||
*/
|
||||
|
||||
/** @param {NS} ns */
|
||||
export async function main(ns) {
|
||||
const job = JSON.parse(ns.args[0]);
|
||||
let delay = job.end - job.time - Date.now();
|
||||
if (delay < 0) {
|
||||
ns.tprint(`WARN: Batch ${job.batch} ${job.type} was ${-delay}ms late. (${job.end})\n`);
|
||||
ns.writePort(ns.pid, -delay);
|
||||
delay = 0;
|
||||
} else {
|
||||
ns.writePort(ns.pid, 0);
|
||||
}
|
||||
await ns.grow(job.target, { additionalMsec: delay });
|
||||
const end = Date.now();
|
||||
ns.atExit(() => {
|
||||
// if (job.report) ns.writePort(job.port, job.type + job.server);
|
||||
ns.tprint(`Batch ${job.batch}: ${job.type} finished at ${end.toString().slice(-6)}/${Math.round(job.end).toString().slice(-6)}\n`);
|
||||
});
|
||||
}
|
||||
23
local/path/home/S2tHack.js
Normal file
23
local/path/home/S2tHack.js
Normal file
@@ -0,0 +1,23 @@
|
||||
/*
|
||||
Very little has changed in the workers. We uncommented a couple of parts to allow for the ping-pong deployment.
|
||||
See the tWeaken.js for full comments.
|
||||
*/
|
||||
|
||||
/** @param {NS} ns */
|
||||
export async function main(ns) {
|
||||
const job = JSON.parse(ns.args[0]);
|
||||
let delay = job.end - job.time - Date.now();
|
||||
if (delay < 0) {
|
||||
ns.tprint(`WARN: Batch ${job.batch} ${job.type} was ${-delay}ms late. (${job.end})\n`);
|
||||
ns.writePort(ns.pid, -delay);
|
||||
delay = 0;
|
||||
} else {
|
||||
ns.writePort(ns.pid, 0);
|
||||
}
|
||||
await ns.hack(job.target, { additionalMsec: delay });
|
||||
const end = Date.now();
|
||||
ns.atExit(() => {
|
||||
// if (job.report) ns.writePort(job.port, job.type + job.server);
|
||||
ns.tprint(`Batch ${job.batch}: ${job.type} finished at ${end.toString().slice(-6)}/${Math.round(job.end).toString().slice(-6)}\n`);
|
||||
});
|
||||
}
|
||||
27
local/path/home/S2tWeaken.js
Normal file
27
local/path/home/S2tWeaken.js
Normal file
@@ -0,0 +1,27 @@
|
||||
/*
|
||||
Very little has changed in the workers. We uncommented a couple of parts to allow for the ping-pong deployment.
|
||||
See the tWeaken.js for full comments.
|
||||
*/
|
||||
|
||||
/** @param {NS} ns */
|
||||
export async function main(ns) {
|
||||
const job = JSON.parse(ns.args[0]);
|
||||
|
||||
let delay = job.end - job.time - Date.now();
|
||||
if (delay < 0) {
|
||||
// We now write back to the controller if jobs are delayed so that it can adjust the other jobs to match.
|
||||
ns.tprint(`WARN: Batch ${job.batch} ${job.type} was ${-delay}ms late. (${job.end})\n`);
|
||||
ns.writePort(ns.pid, -delay);
|
||||
delay = 0;
|
||||
} else {
|
||||
ns.writePort(ns.pid, 0);
|
||||
}
|
||||
await ns.weaken(job.target, { additionalMsec: delay });
|
||||
const end = Date.now();
|
||||
|
||||
// Write back to let the controller know that we're done. The actual data is currently only used by the prep function.
|
||||
ns.atExit(() => {
|
||||
if (job.report) ns.writePort(job.port, job.type + job.server);
|
||||
ns.tprint(`Batch ${job.batch}: ${job.type} finished at ${end.toString().slice(-6)}/${Math.round(job.end).toString().slice(-6)}\n`);
|
||||
});
|
||||
}
|
||||
218
local/path/home/S2utils.js
Normal file
218
local/path/home/S2utils.js
Normal file
@@ -0,0 +1,218 @@
|
||||
/*
|
||||
The utility function library. This is almost entirely unchanged from part 1, aside from the prep function
|
||||
printing to the log console instead of the terminal.
|
||||
*/
|
||||
|
||||
/** @param {NS} ns */
|
||||
export async function main(ns) {
|
||||
ns.tprint("This is just a function library, it doesn't do anything.");
|
||||
}
|
||||
|
||||
// The recursive server navigation algorithm. The lambda predicate determines which servers to add to the final list.
|
||||
// You can also plug other functions into the lambda to perform other tasks that check all servers at the same time.
|
||||
/** @param {NS} ns */
|
||||
export function getServers(ns, lambdaCondition = () => true, hostname = "home", servers = [], visited = []) {
|
||||
if (visited.includes(hostname)) return;
|
||||
visited.push(hostname);
|
||||
if (lambdaCondition(hostname)) servers.push(hostname);
|
||||
const connectedNodes = ns.scan(hostname);
|
||||
if (hostname !== "home") connectedNodes.shift();
|
||||
for (const node of connectedNodes) getServers(ns, lambdaCondition, node, servers, visited);
|
||||
return servers;
|
||||
}
|
||||
|
||||
// Here are a couple of my own getServers modules.
|
||||
// This one finds the best target for hacking. It tries to balance expected return with time taken.
|
||||
/** @param {NS} ns */
|
||||
export function checkTarget(ns, server, target = "n00dles", forms = false) {
|
||||
if (!ns.hasRootAccess(server)) return target;
|
||||
const player = ns.getPlayer();
|
||||
const serverSim = ns.getServer(server);
|
||||
const pSim = ns.getServer(target);
|
||||
let previousScore;
|
||||
let currentScore;
|
||||
if (serverSim.requiredHackingSkill <= player.skills.hacking / (forms ? 1 : 2)) {
|
||||
if (forms) {
|
||||
serverSim.hackDifficulty = serverSim.minDifficulty;
|
||||
pSim.hackDifficulty = pSim.minDifficulty;
|
||||
previousScore = pSim.moneyMax / ns.formulas.hacking.weakenTime(pSim, player) * ns.formulas.hacking.hackChance(pSim, player);
|
||||
currentScore = serverSim.moneyMax / ns.formulas.hacking.weakenTime(serverSim, player) * ns.formulas.hacking.hackChance(serverSim, player);
|
||||
} else {
|
||||
previousScore = pSim.moneyMax / pSim.minDifficulty;
|
||||
currentScore = serverSim.moneyMax / serverSim.minDifficulty;
|
||||
}
|
||||
if (currentScore > previousScore) target = server;
|
||||
}
|
||||
return target;
|
||||
}
|
||||
|
||||
// A simple function for copying a list of scripts to a server.
|
||||
/** @param {NS} ns */
|
||||
export function copyScripts(ns, server, scripts, overwrite = false) {
|
||||
for (const script of scripts) {
|
||||
if ((!ns.fileExists(script, server) || overwrite) && ns.hasRootAccess(server)) {
|
||||
ns.scp(script, server);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// A generic function to check that a given server is prepped. Mostly just a convenience.
|
||||
export function isPrepped(ns, server) {
|
||||
const tolerance = 0.0001;
|
||||
const maxMoney = ns.getServerMaxMoney(server);
|
||||
const money = ns.getServerMoneyAvailable(server);
|
||||
const minSec = ns.getServerMinSecurityLevel(server);
|
||||
const sec = ns.getServerSecurityLevel(server);
|
||||
const secFix = Math.abs(sec - minSec) < tolerance;
|
||||
return (money === maxMoney && secFix) ? true : false;
|
||||
}
|
||||
|
||||
/*
|
||||
This prep function isn't part of the tutorial, but the rest of the code wouldn't work without it.
|
||||
I don't make any guarantees, but I've been using it and it's worked well enough. I'll comment it anyway.
|
||||
The prep strategy uses a modified proto-batching technique, which will be covered in part 2.
|
||||
*/
|
||||
/** @param {NS} ns */
|
||||
export async function prep(ns, values, ramNet) {
|
||||
const maxMoney = values.maxMoney;
|
||||
const minSec = values.minSec;
|
||||
let money = values.money;
|
||||
let sec = values.sec;
|
||||
while (!isPrepped(ns, values.target)) {
|
||||
const wTime = ns.getWeakenTime(values.target);
|
||||
const gTime = wTime * 0.8;
|
||||
const dataPort = ns.getPortHandle(ns.pid);
|
||||
dataPort.clear();
|
||||
|
||||
const pRam = ramNet.cloneBlocks();
|
||||
const maxThreads = Math.floor(ramNet.maxBlockSize / 1.75);
|
||||
const totalThreads = ramNet.prepThreads;
|
||||
let wThreads1 = 0;
|
||||
let wThreads2 = 0;
|
||||
let gThreads = 0;
|
||||
let batchCount = 1;
|
||||
let script, mode;
|
||||
/*
|
||||
Modes:
|
||||
0: Security only
|
||||
1: Money only
|
||||
2: One shot
|
||||
*/
|
||||
|
||||
if (money < maxMoney) {
|
||||
gThreads = Math.ceil(ns.growthAnalyze(values.target, maxMoney / money));
|
||||
wThreads2 = Math.ceil(ns.growthAnalyzeSecurity(gThreads) / 0.05);
|
||||
}
|
||||
if (sec > minSec) {
|
||||
wThreads1 = Math.ceil((sec - minSec) * 20);
|
||||
if (!(wThreads1 + wThreads2 + gThreads <= totalThreads && gThreads <= maxThreads)) {
|
||||
gThreads = 0;
|
||||
wThreads2 = 0;
|
||||
batchCount = Math.ceil(wThreads1 / totalThreads);
|
||||
if (batchCount > 1) wThreads1 = totalThreads;
|
||||
mode = 0;
|
||||
} else mode = 2;
|
||||
} else if (gThreads > maxThreads || gThreads + wThreads2 > totalThreads) {
|
||||
mode = 1;
|
||||
const oldG = gThreads;
|
||||
wThreads2 = Math.max(Math.floor(totalThreads / 13.5), 1);
|
||||
gThreads = Math.floor(wThreads2 * 12.5);
|
||||
batchCount = Math.ceil(oldG / gThreads);
|
||||
} else mode = 2;
|
||||
|
||||
// Big buffer here, since all the previous calculations can take a while. One second should be more than enough.
|
||||
const wEnd1 = Date.now() + wTime + 1000;
|
||||
const gEnd = wEnd1 + values.spacer;
|
||||
const wEnd2 = gEnd + values.spacer;
|
||||
|
||||
// "metrics" here is basically a mock Job object. Again, this is just an artifact of repurposed old code.
|
||||
const metrics = {
|
||||
batch: "prep",
|
||||
target: values.target,
|
||||
type: "none",
|
||||
time: 0,
|
||||
end: 0,
|
||||
port: ns.pid,
|
||||
log: values.log,
|
||||
report: false
|
||||
};
|
||||
|
||||
// Actually assigning threads. We actually allow grow threads to be spread out in mode 1.
|
||||
// This is because we don't mind if the effect is a bit reduced from higher security unlike a normal batcher.
|
||||
// We're not trying to grow a specific amount, we're trying to grow as much as possible.
|
||||
for (const block of pRam) {
|
||||
while (block.ram >= 1.75) {
|
||||
const bMax = Math.floor(block.ram / 1.75)
|
||||
let threads = 0;
|
||||
if (wThreads1 > 0) {
|
||||
script = "S2tWeaken.js";
|
||||
metrics.type = "pWeaken1";
|
||||
metrics.time = wTime;
|
||||
metrics.end = wEnd1;
|
||||
threads = Math.min(wThreads1, bMax);
|
||||
if (wThreads2 === 0 && wThreads1 - threads <= 0) metrics.report = true;
|
||||
wThreads1 -= threads;
|
||||
} else if (wThreads2 > 0) {
|
||||
script = "S2tWeaken.js";
|
||||
metrics.type = "pWeaken2";
|
||||
metrics.time = wTime;
|
||||
metrics.end = wEnd2;
|
||||
threads = Math.min(wThreads2, bMax);
|
||||
if (wThreads2 - threads === 0) metrics.report = true;
|
||||
wThreads2 -= threads;
|
||||
} else if (gThreads > 0 && mode === 1) {
|
||||
script = "S2tGrow.js";
|
||||
metrics.type = "pGrow";
|
||||
metrics.time = gTime;
|
||||
metrics.end = gEnd;
|
||||
threads = Math.min(gThreads, bMax);
|
||||
metrics.report = false;
|
||||
gThreads -= threads;
|
||||
} else if (gThreads > 0 && bMax >= gThreads) {
|
||||
script = "S2tGrow.js";
|
||||
metrics.type = "pGrow";
|
||||
metrics.time = gTime;
|
||||
metrics.end = gEnd;
|
||||
threads = gThreads;
|
||||
metrics.report = false;
|
||||
gThreads = 0;
|
||||
} else break;
|
||||
metrics.server = block.server;
|
||||
const pid = ns.exec(script, block.server, { threads: threads, temporary: true }, JSON.stringify(metrics));
|
||||
if (!pid) throw new Error("Unable to assign all jobs.");
|
||||
block.ram -= 1.75 * threads;
|
||||
}
|
||||
}
|
||||
|
||||
// Fancy UI stuff to update you on progress.
|
||||
const tEnd = ((mode === 0 ? wEnd1 : wEnd2) - Date.now()) * batchCount + Date.now();
|
||||
const timer = setInterval(() => {
|
||||
ns.clearLog();
|
||||
switch (mode) {
|
||||
case 0:
|
||||
ns.print(`Weakening security on ${values.target}...`);
|
||||
break;
|
||||
case 1:
|
||||
ns.print(`Maximizing money on ${values.target}...`);
|
||||
break;
|
||||
case 2:
|
||||
ns.print(`Finalizing preparation on ${values.target}...`);
|
||||
}
|
||||
ns.print(`Security: +${ns.formatNumber(sec - minSec, 3)}`);
|
||||
ns.print(`Money: \$${ns.formatNumber(money, 2)}/${ns.formatNumber(maxMoney, 2)}`);
|
||||
const time = tEnd - Date.now();
|
||||
ns.print(`Estimated time remaining: ${ns.tFormat(time)}`);
|
||||
ns.print(`~${batchCount} ${(batchCount === 1) ? "batch" : "batches"}.`);
|
||||
}, 200);
|
||||
ns.atExit(() => clearInterval(timer));
|
||||
|
||||
// Wait for the last weaken to finish.
|
||||
do await dataPort.nextWrite(); while (!dataPort.read().startsWith("pWeaken"));
|
||||
clearInterval(timer);
|
||||
await ns.sleep(100);
|
||||
|
||||
money = ns.getServerMoneyAvailable(values.target);
|
||||
sec = ns.getServerSecurityLevel(values.target);
|
||||
}
|
||||
return true;
|
||||
}
|
||||
491
local/path/home/S4controller.js
Normal file
491
local/path/home/S4controller.js
Normal file
@@ -0,0 +1,491 @@
|
||||
/*
|
||||
Welcome to part 4. A continuous batcher is a major hurdle compared to everything we've done so far. The number
|
||||
and complexity of the challenges increases drastically when trying to keep everything running indefinitely.
|
||||
With luck, the overengineering we've done so far will have well prepared us for the challenges of a periodic
|
||||
batcher.
|
||||
|
||||
Technically, I use quite a few JIT techniques in this batcher, but I don't consider it a true JIT batcher
|
||||
as it doesn't take full advantage of the potential RAM efficiency. Instead, I favor simpler logic, while still
|
||||
allowing the batcher to make certain adjustments if it needs to.
|
||||
|
||||
When it comes to continuous batchers, performance is king. We're going to aim for 5ms spacing as we have
|
||||
throughout this guide so far, but there's a lot we need to do in those 5ms. As such, we need to make sure that
|
||||
we choose which operations to do carefully, as well as when to do them and how to make sure they are as fast
|
||||
as we can make them.
|
||||
*/
|
||||
|
||||
// One new utility. A custom data structure for managing our schedule. You can see the details in utils.js
|
||||
import { getServers, copyScripts, checkTarget, isPrepped, prep, Deque } from "/S4utils.js";
|
||||
|
||||
const TYPES = ["hack", "weaken1", "grow", "weaken2"];
|
||||
const WORKERS = ["S4tHack.js", "S4tWeaken.js", "S4tGrow.js"];
|
||||
const SCRIPTS = { hack: "S4tHack.js", weaken1: "S4tWeaken.js", grow: "S4tGrow.js", weaken2: "S4tWeaken.js" };
|
||||
const COSTS = { hack: 1.7, weaken1: 1.75, grow: 1.75, weaken2: 1.75 };
|
||||
// const OFFSETS = { hack: 0, weaken1: 1, grow: 2, weaken2: 3 };
|
||||
|
||||
// A new optional constant. The RamNet will attempt to reserve this much ram at home.
|
||||
// You can set it to 0 if you don't want to reserve anything, and setting it too high will just reserve as much as possible.
|
||||
const RESERVED_HOME_RAM = 0;
|
||||
|
||||
// A brand new class to help keep our increasingly complex logic organized.
|
||||
class ContinuousBatcher {
|
||||
#ns; // The ns object. Stored as a class variable to save me the trouble of passing it all the time.
|
||||
|
||||
// The usual data we've grown familiar with by now.
|
||||
#metrics;
|
||||
#ramNet;
|
||||
#target;
|
||||
#schedule;
|
||||
#dataPort;
|
||||
#batchCount = 0;
|
||||
#desyncs = 0; // This is mostly used for logging purposes, since the batcher is self-correcting.
|
||||
|
||||
// A capital M Map. We'll use this to keep track of active jobs.
|
||||
#running = new Map();
|
||||
|
||||
constructor(ns, metrics, ramNet) {
|
||||
this.#ns = ns;
|
||||
this.#metrics = metrics;
|
||||
this.#ramNet = ramNet;
|
||||
this.#target = metrics.target;
|
||||
this.#dataPort = ns.getPortHandle(ns.pid);
|
||||
|
||||
// Seeding the first ending time.
|
||||
this.#metrics.end = Date.now() + metrics.wTime - metrics.spacer;
|
||||
|
||||
// The new schedule I promised. It's a double-ended queue, but we'll mostly just be using it as a normal queue.
|
||||
// It has a static size, so we make sure it can accomodate all of our jobs.
|
||||
this.#schedule = new Deque(metrics.depth * 4);
|
||||
}
|
||||
|
||||
// This is a function that can schedule a given number of batches.
|
||||
// With no arguments, it just fills up the queue.
|
||||
scheduleBatches(batches = this.#metrics.depth) {
|
||||
while (this.#schedule.size < batches * 4) {
|
||||
++this.#batchCount;
|
||||
for (const type of TYPES) {
|
||||
this.#metrics.end += this.#metrics.spacer;
|
||||
const job = new Job(type, this.#metrics, this.#batchCount);
|
||||
|
||||
/*
|
||||
We don't actually error out if a job can't be assigned anymore. Instead, we just assign as much
|
||||
as we can. If it desyncs, the logic will correct it, and if a weaken2 gets cancelled then the actual
|
||||
depth will naturally decrease below the target depth. Not a perfect fix, but better than breaking.
|
||||
*/
|
||||
if (!this.#ramNet.assign(job)) {
|
||||
this.#ns.tprint(`WARN: Insufficient RAM to assign ${job.type}: ${job.batch}.`);
|
||||
continue;
|
||||
}
|
||||
this.#schedule.push(job);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// The function for deploying jobs. Very similar to the code from our shotgun batcher with some minor changes.
|
||||
async deploy() {
|
||||
// The for loop is replaced by a while loop, since our Deque isn't iterable.
|
||||
while (!this.#schedule.isEmpty()) {
|
||||
const job = this.#schedule.shift();
|
||||
job.end += this.#metrics.delay;
|
||||
const jobPid = this.#ns.exec(SCRIPTS[job.type], job.server, { threads: job.threads, temporary: true }, JSON.stringify(job));
|
||||
if (!jobPid) throw new Error(`Unable to deploy ${job.type}`);
|
||||
const tPort = this.#ns.getPortHandle(jobPid);
|
||||
|
||||
// We save the pid for later.
|
||||
job.pid = jobPid;
|
||||
await tPort.nextWrite();
|
||||
|
||||
// Jobs can be late as long as the delay won't cause collisions.
|
||||
this.#metrics.delay += Math.max(Math.ceil(tPort.read()) - this.#metrics.spacer, 0);
|
||||
this.#running.set(job.id, job);
|
||||
}
|
||||
|
||||
// After the loop, we adjust future job ends to account for the delay, then discard it.
|
||||
this.#metrics.end += this.#metrics.delay;
|
||||
this.#metrics.delay = 0;
|
||||
}
|
||||
|
||||
// Our old timeout function is now a proper function of its own. A few extra baubles in the log, but nothing exciting.
|
||||
/** @param {NS} ns */
|
||||
log() {
|
||||
const ns = this.#ns;
|
||||
const metrics = this.#metrics;
|
||||
const ramNet = this.#ramNet;
|
||||
ns.clearLog();
|
||||
ns.print(`Hacking ~\$${ns.formatNumber(metrics.maxMoney * metrics.greed * metrics.chance / (4 * metrics.spacer) * 1000)}/s from ${metrics.target}`);
|
||||
ns.print(`Status: ${isPrepped(ns, this.#target) ? "Prepped" : "Desynced"}`);
|
||||
ns.print(`Security: +${metrics.minSec - metrics.sec}`);
|
||||
ns.print(`Money: \$${ns.formatNumber(metrics.money, 2)}/${ns.formatNumber(metrics.maxMoney, 2)}`);
|
||||
ns.print(`Greed: ${Math.floor(metrics.greed * 1000) / 10}%`);
|
||||
ns.print(`Ram available: ${ns.formatRam(ramNet.totalRam)}/${ns.formatRam(ramNet.maxRam)}`);
|
||||
ns.print(`Active jobs: ${this.#running.size}/${metrics.depth * 4}`);
|
||||
|
||||
// You'll see what this line's about in a moment.
|
||||
if (this.#desyncs) ns.print(`Hacks cancelled by desync: ${this.#desyncs}`);
|
||||
}
|
||||
|
||||
// The core loop of our batcher logic. Quite lean with everything neatly divided into functions, but there's still
|
||||
// plenty going on here.
|
||||
async run() {
|
||||
// First we do some initial setup, this is essentially firing off a shotgun blast to get us started.
|
||||
const dataPort = this.#dataPort;
|
||||
this.scheduleBatches();
|
||||
await this.deploy();
|
||||
await this.#ns.sleep(0); // This is probably pointless. I forget why I put it here.
|
||||
this.log();
|
||||
while (true) {
|
||||
// Wait for the nextWrite, as usual.
|
||||
await dataPort.nextWrite();
|
||||
|
||||
// Sometimes there's a delay and more than one job writes to the port at once.
|
||||
// We make sure to handle it all before we move on.
|
||||
while (!dataPort.empty()) {
|
||||
// Workers now report unique identifiers (type + batchnumber) used to find them on the map.
|
||||
const data = dataPort.read();
|
||||
|
||||
// Free up the ram, them remove them from the active list.
|
||||
// The check handles a corner case where a hack gets "cancelled" after it's already finished.
|
||||
if (this.#running.has(data)) {
|
||||
this.#ramNet.finish(this.#running.get(data));
|
||||
this.#running.delete(data);
|
||||
}
|
||||
|
||||
// If it's a W2, we've got an opening to do some work.
|
||||
if (data.startsWith("weaken2")) {
|
||||
// Recalculate times. Threads too, but only if prepped (the logic is in the function itself).
|
||||
this.#metrics.calculate(this.#ns);
|
||||
|
||||
/*
|
||||
This is probably the most JIT-like aspect of the entire batcher. If the server isn't prepped, then
|
||||
we cancel the next hack to let the server fix itself. Between this and the extra 1% grow threads, level
|
||||
ups are completely handled. Rapid level ups can lead to a lot of lost jobs, but eventually the program
|
||||
stabilizes.
|
||||
|
||||
There are probably more efficient ways to do this. Heck, even this solution could be optimized better,
|
||||
but for now, this is an adequate demonstration of a reasonable non-formulas solution to the level up
|
||||
problem. It also lets us dip our toes into JIT logic in preparation for the final part.
|
||||
*/
|
||||
if (!isPrepped(this.#ns, this.#target)) {
|
||||
const id = "hack" + (parseInt(data.slice(7)) + 1);
|
||||
const cancel = this.#running.get(id);
|
||||
// Just in case the hack was already aborted somehow.
|
||||
if (cancel) {
|
||||
this.#ramNet.finish(cancel);
|
||||
this.#ns.kill(cancel.pid);
|
||||
this.#running.delete(id);
|
||||
++this.#desyncs; // Just to keep track of how much we've lost keeping things prepped.
|
||||
}
|
||||
}
|
||||
|
||||
// Then of course we just schedule and deploy a new batch.
|
||||
this.scheduleBatches(1);
|
||||
await this.deploy();
|
||||
this.log();
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
Our poor "main" function isn't much more than a kickstart for our new batcher object. It's a bit weird having
|
||||
it wedged between objects like this, but I wanted to have the new functionality up at the top since most of the
|
||||
remaining code hasn't changed much. I'll comment the changes anyway.
|
||||
*/
|
||||
/** @param {NS} ns */
|
||||
export async function main(ns) {
|
||||
ns.disableLog("ALL");
|
||||
ns.tail();
|
||||
|
||||
/*
|
||||
This commented out code is for a debugging tool that centralizes logs from the worker scripts into one place.
|
||||
It's main advantage is the ability to write txt logs to file, which can be perused later to track down errors.
|
||||
You can uncomment it if you'd like to see a live stream of workers finishing without flooding the terminal.
|
||||
|
||||
If you do, make sure to search the file for -LOGGING and uncomment all relevant lines.
|
||||
*/
|
||||
// if (ns.isRunning("S4logHelper.js", "home")) ns.kill("S4logHelper.js", "home");
|
||||
// const logPort = ns.exec("S4logHelper.js", "home");
|
||||
// ns.atExit(() => ns.closeTail(logPort));
|
||||
|
||||
// Setup is mostly the same.
|
||||
const dataPort = ns.getPortHandle(ns.pid);
|
||||
dataPort.clear();
|
||||
let target = ns.args[0] ? ns.args[0] : "n00dles";
|
||||
while (true) {
|
||||
const servers = getServers(ns, (server) => {
|
||||
if (!ns.args[0]) target = checkTarget(ns, server, target, ns.fileExists("Formulas.exe", "home"));
|
||||
copyScripts(ns, server, WORKERS, true);
|
||||
return ns.hasRootAccess(server);
|
||||
});
|
||||
const ramNet = new RamNet(ns, servers);
|
||||
const metrics = new Metrics(ns, target);
|
||||
// metrics.log = logPort; // Uncomment for -LOGGING.
|
||||
if (!isPrepped(ns, target)) await prep(ns, metrics, ramNet);
|
||||
ns.clearLog();
|
||||
ns.print("Optimizing. This may take a few seconds...");
|
||||
|
||||
// Optimizer has changed again. Back to being synchronous, since the performance is much better.
|
||||
optimizePeriodic(ns, metrics, ramNet);
|
||||
metrics.calculate(ns);
|
||||
|
||||
// Create and run our batcher.
|
||||
const batcher = new ContinuousBatcher(ns, metrics, ramNet);
|
||||
await batcher.run();
|
||||
|
||||
/*
|
||||
You might be wondering why I put this in a while loop and then just return here. The simple answer is that
|
||||
it's because this is meant to be run in a loop, but I didn't implement the logic for it. This version of the
|
||||
batcher is completely static once created. It sticks to a single greed value, and doesn't update if more
|
||||
RAM becomes available.
|
||||
|
||||
In a future version, you'd want some logic to allow the batcher to choose new targets, update its available RAM,
|
||||
and create new batchers during runtime. For now, that's outside the scope of this guide, but consider this loop
|
||||
as a sign of what could be.
|
||||
*/
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
// The Job class, lean as it is, remains mostly unchanged. I got rid of the server argument since I wasn't using it
|
||||
// and added a batch number instead.
|
||||
class Job {
|
||||
constructor(type, metrics, batch) {
|
||||
this.type = type;
|
||||
// this.end = metrics.ends[type];
|
||||
this.end = metrics.end;
|
||||
this.time = metrics.times[type];
|
||||
this.target = metrics.target;
|
||||
this.threads = metrics.threads[type];
|
||||
this.cost = this.threads * COSTS[type];
|
||||
this.server = "none";
|
||||
this.report = true;
|
||||
this.port = metrics.port;
|
||||
this.batch = batch;
|
||||
|
||||
// The future is now. The status and id are used for interacting with the Deque and Map in our batcher class.
|
||||
this.status = "active";
|
||||
this.id = type + batch;
|
||||
// this.log = metrics.log; // -LOGGING
|
||||
}
|
||||
}
|
||||
|
||||
// The only change to the metrics class is the calculate function skipping threadcounts if the server isn't prepped.
|
||||
/** @param {NS} ns */
|
||||
class Metrics {
|
||||
constructor(ns, server) {
|
||||
this.target = server;
|
||||
this.maxMoney = ns.getServerMaxMoney(server);
|
||||
this.money = Math.max(ns.getServerMoneyAvailable(server), 1);
|
||||
this.minSec = ns.getServerMinSecurityLevel(server);
|
||||
this.sec = ns.getServerSecurityLevel(server);
|
||||
this.prepped = isPrepped(ns, server);
|
||||
this.chance = 0;
|
||||
this.wTime = 0;
|
||||
this.delay = 0;
|
||||
this.spacer = 5;
|
||||
this.greed = 0.01;
|
||||
this.depth = 0; // The number of concurrent batches to run. Set by the optimizer.
|
||||
|
||||
this.times = { hack: 0, weaken1: 0, grow: 0, weaken2: 0 };
|
||||
this.end = 0; // Slight change for the new timing. The old way in commented out in case I switch back later.
|
||||
// this.ends = { hack: 0, weaken1: 0, grow: 0, weaken2: 0 };
|
||||
this.threads = { hack: 0, weaken1: 0, grow: 0, weaken2: 0 };
|
||||
|
||||
this.port = ns.pid;
|
||||
}
|
||||
|
||||
calculate(ns, greed = this.greed) {
|
||||
const server = this.target;
|
||||
const maxMoney = this.maxMoney;
|
||||
this.money = ns.getServerMoneyAvailable(server);
|
||||
this.sec = ns.getServerSecurityLevel(server);
|
||||
this.wTime = ns.getWeakenTime(server);
|
||||
this.times.weaken1 = this.wTime;
|
||||
this.times.weaken2 = this.wTime;
|
||||
this.times.hack = this.wTime / 4;
|
||||
this.times.grow = this.wTime * 0.8;
|
||||
// this.depth = this.wTime / this.spacer * 4;
|
||||
|
||||
if (isPrepped(ns, server)) { // The only change.
|
||||
const hPercent = ns.hackAnalyze(server);
|
||||
const amount = maxMoney * greed;
|
||||
const hThreads = Math.max(Math.floor(ns.hackAnalyzeThreads(server, amount)), 1);
|
||||
const tGreed = hPercent * hThreads;
|
||||
const gThreads = Math.ceil(ns.growthAnalyze(server, maxMoney / (maxMoney - maxMoney * tGreed)) * 1.01);
|
||||
this.threads.weaken1 = Math.max(Math.ceil(hThreads * 0.002 / 0.05), 1);
|
||||
this.threads.weaken2 = Math.max(Math.ceil(gThreads * 0.004 / 0.05), 1);
|
||||
this.threads.hack = hThreads;
|
||||
this.threads.grow = gThreads;
|
||||
this.chance = ns.hackAnalyzeChance(server);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// A few minor edits here. An unused "simulation" mode, and reserved RAM on home.
|
||||
/** @param {NS} ns */
|
||||
class RamNet {
|
||||
#blocks = [];
|
||||
#minBlockSize = Infinity;
|
||||
#maxBlockSize = 0;
|
||||
#totalRam = 0;
|
||||
#prepThreads = 0;
|
||||
#maxRam = 0;
|
||||
#index = new Map();
|
||||
|
||||
// Simulate mode ignores running scripts. Can be used to make calculations while the batcher is operating.
|
||||
constructor(ns, servers, simulate = false) {
|
||||
for (const server of servers) {
|
||||
if (ns.hasRootAccess(server)) {
|
||||
const maxRam = ns.getServerMaxRam(server);
|
||||
// Save some extra ram on home. Clamp used ram to maxRam to prevent negative numbers.
|
||||
const reserved = (server === "home") ? RESERVED_HOME_RAM : 0;
|
||||
const used = Math.min((simulate ? 0 : ns.getServerUsedRam(server)) + reserved, maxRam);
|
||||
const ram = maxRam - used;
|
||||
if (maxRam > 0) {
|
||||
const block = { server: server, ram: ram };
|
||||
this.#blocks.push(block);
|
||||
if (ram < this.#minBlockSize) this.#minBlockSize = ram;
|
||||
if (ram > this.#maxBlockSize) this.#maxBlockSize = ram;
|
||||
this.#totalRam += ram;
|
||||
this.#maxRam += maxRam;
|
||||
this.#prepThreads += Math.floor(ram / 1.75);
|
||||
}
|
||||
}
|
||||
}
|
||||
this.#sort();
|
||||
this.#blocks.forEach((block, index) => this.#index.set(block.server, index));
|
||||
}
|
||||
|
||||
#sort() {
|
||||
this.#blocks.sort((x, y) => {
|
||||
if (x.server === "home") return 1;
|
||||
if (y.server === "home") return -1;
|
||||
|
||||
return x.ram - y.ram;
|
||||
});
|
||||
}
|
||||
|
||||
get totalRam() {
|
||||
return this.#totalRam;
|
||||
}
|
||||
|
||||
get maxRam() {
|
||||
return this.#maxRam;
|
||||
}
|
||||
|
||||
get maxBlockSize() {
|
||||
return this.#maxBlockSize;
|
||||
}
|
||||
|
||||
get prepThreads() {
|
||||
return this.#prepThreads;
|
||||
}
|
||||
|
||||
getBlock(server) {
|
||||
if (this.#index.has(server)) {
|
||||
return this.#blocks[this.#index.get(server)];
|
||||
} else {
|
||||
throw new Error(`Server ${server} not found in RamNet.`);
|
||||
}
|
||||
}
|
||||
|
||||
assign(job) {
|
||||
const block = this.#blocks.find(block => block.ram >= job.cost);
|
||||
if (block) {
|
||||
job.server = block.server;
|
||||
block.ram -= job.cost;
|
||||
this.#totalRam -= job.cost;
|
||||
return true;
|
||||
} else return false;
|
||||
}
|
||||
|
||||
finish(job) {
|
||||
const block = this.getBlock(job.server);
|
||||
block.ram += job.cost;
|
||||
this.#totalRam += job.cost;
|
||||
}
|
||||
|
||||
cloneBlocks() {
|
||||
return this.#blocks.map(block => ({ ...block }));
|
||||
}
|
||||
|
||||
printBlocks(ns) {
|
||||
for (const block of this.#blocks) ns.print(block);
|
||||
}
|
||||
|
||||
testThreads(threadCosts) {
|
||||
const pRam = this.cloneBlocks();
|
||||
let batches = 0;
|
||||
let found = true;
|
||||
while (found) {
|
||||
for (const cost of threadCosts) {
|
||||
found = false;
|
||||
const block = pRam.find(block => block.ram >= cost);
|
||||
if (block) {
|
||||
block.ram -= cost;
|
||||
found = true;
|
||||
} else break;
|
||||
}
|
||||
if (found) batches++;
|
||||
}
|
||||
return batches;
|
||||
}
|
||||
}
|
||||
|
||||
// Quite a bit has changed in this one. It's back to being synchronous, though it can still take a while.
|
||||
/**
|
||||
* @param {NS} ns
|
||||
* @param {Metrics} metrics
|
||||
* @param {RamNet} ramNet
|
||||
*/
|
||||
function optimizePeriodic(ns, metrics, ramNet) {
|
||||
const maxThreads = ramNet.maxBlockSize / 1.75;
|
||||
const maxMoney = metrics.maxMoney;
|
||||
const hPercent = ns.hackAnalyze(metrics.target);
|
||||
const wTime = ns.getWeakenTime(metrics.target);
|
||||
|
||||
const minGreed = 0.001;
|
||||
const maxSpacer = wTime; // This is more of an infinite loop safety net than anything.
|
||||
const stepValue = 0.01;
|
||||
let greed = 0.95; // Capping greed a bit lower. I don't have a compelling reason for this.
|
||||
let spacer = metrics.spacer; // We'll be adjusting the spacer in low ram conditions to allow smaller depths.
|
||||
|
||||
while (greed > minGreed && spacer < maxSpacer) {
|
||||
// We calculate a max depth based on the spacer, then add one as a buffer.
|
||||
const depth = Math.ceil(wTime / (4 * spacer)) + 1;
|
||||
const amount = maxMoney * greed;
|
||||
const hThreads = Math.max(Math.floor(ns.hackAnalyzeThreads(metrics.target, amount)), 1);
|
||||
const tGreed = hPercent * hThreads;
|
||||
const gThreads = Math.ceil(ns.growthAnalyze(metrics.target, maxMoney / (maxMoney - maxMoney * tGreed)) * 1.01);
|
||||
if (Math.max(hThreads, gThreads) <= maxThreads) {
|
||||
const wThreads1 = Math.max(Math.ceil(hThreads * 0.002 / 0.05), 1);
|
||||
const wThreads2 = Math.max(Math.ceil(gThreads * 0.004 / 0.05), 1);
|
||||
|
||||
const threadCosts = [hThreads * 1.7, wThreads1 * 1.75, gThreads * 1.75, wThreads2 * 1.75];
|
||||
|
||||
// Glad I kept these, they turned out to be useful after all. When trying to hit target depth,
|
||||
// checking that there's actually enough theoretical ram to fit them is a massive boost to performance.
|
||||
const totalCost = threadCosts.reduce((t, c) => t + c) * depth;
|
||||
if (totalCost < ramNet.totalRam) {
|
||||
// Double check that we can actually fit our threads into ram, then set our metrics and return.
|
||||
const batchCount = ramNet.testThreads(threadCosts);
|
||||
if (batchCount >= depth) {
|
||||
metrics.spacer = spacer;
|
||||
metrics.greed = greed;
|
||||
metrics.depth = depth;
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
// await ns.sleep(0); // Uncomment and make the function async if you don't like the freeze on startup.
|
||||
|
||||
// Decrement greed until we hit the minimum, then reset and increment spacer. We'll find a valid configuration eventually.
|
||||
greed -= stepValue;
|
||||
if (greed < minGreed && spacer < maxSpacer) {
|
||||
greed = 0.99;
|
||||
++spacer;
|
||||
}
|
||||
}
|
||||
throw new Error("Not enough ram to run even a single batch. Something has gone seriously wrong.");
|
||||
}
|
||||
42
local/path/home/S4logHelper.js
Normal file
42
local/path/home/S4logHelper.js
Normal file
@@ -0,0 +1,42 @@
|
||||
/*
|
||||
This script is completely unchanged from the last part. As a note, if you find that saves are taking a very long time
|
||||
it may help to disable txt logging when you aren't actively debugging. The log files generated by this script
|
||||
are quite big even when it's erasing the data on each new instance.
|
||||
*/
|
||||
|
||||
/** @param {NS} ns */
|
||||
export async function main(ns) {
|
||||
|
||||
const logFile = "S4log.txt";
|
||||
ns.clear(logFile); // Clear the previous log for each instance.
|
||||
ns.disableLog("ALL");
|
||||
ns.tail();
|
||||
ns.moveTail(200, 200); // Move it out of the way so it doesn't cover up the controller.
|
||||
const logPort = ns.getPortHandle(ns.pid);
|
||||
logPort.clear();
|
||||
|
||||
// Pretty simple. Just wait until something writes to the log and save the info.
|
||||
// Writes to its own console as well as a text file.
|
||||
let max = 0;
|
||||
let count = 0;
|
||||
let total = 0;
|
||||
let errors = 0;
|
||||
while (true) {
|
||||
await logPort.nextWrite();
|
||||
do {
|
||||
const data = logPort.read();
|
||||
// if (data > max) max = data;
|
||||
// if (data > 5) ++errors;
|
||||
// total += data;
|
||||
// ++count;
|
||||
// ns.clearLog();
|
||||
// ns.print(`Max desync: ${max}`);
|
||||
// ns.print(`Average desync: ${total / count}`);
|
||||
// ns.print(`Errors: ${errors}`);
|
||||
|
||||
// if (data.startsWith("WARN")) ns.print(data);
|
||||
ns.print(data);
|
||||
// ns.write(logFile, data); // Comment this line out to disable txt logging.
|
||||
} while (!logPort.empty());
|
||||
}
|
||||
}
|
||||
30
local/path/home/S4tGrow.js
Normal file
30
local/path/home/S4tGrow.js
Normal file
@@ -0,0 +1,30 @@
|
||||
/*
|
||||
A pretty big change this time. Well, big for workers anyway. I've tightened up the delay calculations
|
||||
to be as perfect as I can get them. Full comments in weaken.js as usual.
|
||||
*/
|
||||
|
||||
/** @param {NS} ns */
|
||||
export async function main(ns) {
|
||||
const start = performance.now();
|
||||
const port = ns.getPortHandle(ns.pid);
|
||||
const job = JSON.parse(ns.args[0]);
|
||||
let tDelay = 0;
|
||||
let delay = job.end - job.time - Date.now();
|
||||
if (delay < 0) {
|
||||
ns.tprint(`WARN: Batch ${job.batch} ${job.type} was ${-delay}ms late. (${job.end})\n`);
|
||||
tDelay = -delay
|
||||
delay = 0;
|
||||
}
|
||||
const promise = ns.grow(job.target, { additionalMsec: delay });
|
||||
tDelay += performance.now() - start;
|
||||
port.write(tDelay);
|
||||
await promise;
|
||||
|
||||
ns.atExit(() => {
|
||||
const end = Date.now();
|
||||
if (job.report) ns.writePort(job.port, job.type + job.batch);
|
||||
// Uncomment one of these if you want to log completed jobs. Make sure to uncomment the appropriate lines in the controller as well.
|
||||
// ns.tprint(`Batch ${job.batch}: ${job.type} finished at ${end.toString().slice(-6)}/${Math.round(job.end).toString().slice(-6)}\n`);
|
||||
// ns.writePort(job.log, `Batch ${job.batch}: ${job.type} finished at ${end.toString().slice(-6)}/${Math.round(job.end + tDelay).toString().slice(-6)}\n`);
|
||||
});
|
||||
}
|
||||
30
local/path/home/S4tHack.js
Normal file
30
local/path/home/S4tHack.js
Normal file
@@ -0,0 +1,30 @@
|
||||
/*
|
||||
A pretty big change this time. Well, big for workers anyway. I've tightened up the delay calculations
|
||||
to be as perfect as I can get them. Full comments in weaken.js as usual.
|
||||
*/
|
||||
|
||||
/** @param {NS} ns */
|
||||
export async function main(ns) {
|
||||
const start = performance.now();
|
||||
const port = ns.getPortHandle(ns.pid);
|
||||
const job = JSON.parse(ns.args[0]);
|
||||
let tDelay = 0;
|
||||
let delay = job.end - job.time - Date.now();
|
||||
if (delay < 0) {
|
||||
ns.tprint(`WARN: Batch ${job.batch} ${job.type} was ${-delay}ms late. (${job.end})\n`);
|
||||
tDelay = -delay
|
||||
delay = 0;
|
||||
}
|
||||
const promise = ns.hack(job.target, { additionalMsec: delay });
|
||||
tDelay += performance.now() - start;
|
||||
port.write(tDelay);
|
||||
await promise;
|
||||
|
||||
ns.atExit(() => {
|
||||
const end = Date.now();
|
||||
if (job.report) ns.writePort(job.port, job.type + job.batch);
|
||||
// Uncomment one of these if you want to log completed jobs. Make sure to uncomment the appropriate lines in the controller as well.
|
||||
// ns.tprint(`Batch ${job.batch}: ${job.type} finished at ${end.toString().slice(-6)}/${Math.round(job.end).toString().slice(-6)}\n`);
|
||||
// ns.writePort(job.log, `Batch ${job.batch}: ${job.type} finished at ${end.toString().slice(-6)}/${Math.round(job.end + tDelay).toString().slice(-6)}\n`);
|
||||
});
|
||||
}
|
||||
41
local/path/home/S4tWeaken.js
Normal file
41
local/path/home/S4tWeaken.js
Normal file
@@ -0,0 +1,41 @@
|
||||
/*
|
||||
A pretty big change this time. Well, big for workers anyway. I've tightened up the delay calculations
|
||||
to be as perfect as I can get them. Full comments in weaken.js as usual.
|
||||
*/
|
||||
|
||||
/** @param {NS} ns */
|
||||
export async function main(ns) {
|
||||
const start = performance.now();
|
||||
const port = ns.getPortHandle(ns.pid); // We have to define this here. You'll see why in a moment.
|
||||
const job = JSON.parse(ns.args[0]);
|
||||
let tDelay = 0;
|
||||
let delay = job.end - job.time - Date.now();
|
||||
|
||||
// Don't report delay right away.
|
||||
if (delay < 0) {
|
||||
ns.tprint(`WARN: Batch ${job.batch} ${job.type} was ${-delay}ms late. (${job.end})\n`);
|
||||
tDelay = -delay
|
||||
delay = 0;
|
||||
}
|
||||
|
||||
// The actual function call can take some time, so instead of awaiting on it right away, we save the promise for later.
|
||||
const promise = ns.weaken(job.target, { additionalMsec: delay });
|
||||
|
||||
// Then after calling the hack function, we calculate our final delay and report it to the controller.
|
||||
tDelay += performance.now() - start;
|
||||
|
||||
// The ns object is tied up by the promise, so invoking it now would cause a concurrency error.
|
||||
// That's why we fetched this handle earlier.
|
||||
port.write(tDelay);
|
||||
|
||||
// Then we finally await the promise. This should give millisecond-accurate predictions for the end time of a job.
|
||||
await promise;
|
||||
|
||||
ns.atExit(() => {
|
||||
const end = Date.now();
|
||||
if (job.report) ns.writePort(job.port, job.type + job.batch);
|
||||
// Uncomment one of these if you want to log completed jobs. Make sure to uncomment the appropriate lines in the controller as well.
|
||||
// ns.tprint(`Batch ${job.batch}: ${job.type} finished at ${end.toString().slice(-6)}/${Math.round(job.end).toString().slice(-6)}\n`);
|
||||
// ns.writePort(job.log, `Batch ${job.batch}: ${job.type} finished at ${end.toString().slice(-6)}/${Math.round(job.end + tDelay).toString().slice(-6)}\n`);
|
||||
});
|
||||
}
|
||||
420
local/path/home/S4utils.js
Normal file
420
local/path/home/S4utils.js
Normal file
@@ -0,0 +1,420 @@
|
||||
/*
|
||||
We've got a brand new class to look at, but the rest of the file remains unchanged.
|
||||
*/
|
||||
|
||||
/** @param {NS} ns */
|
||||
export async function main(ns) {
|
||||
ns.tprint("This is just a function library, it doesn't do anything.");
|
||||
}
|
||||
|
||||
/*
|
||||
This is an overengineered abomination of a custom data structure. It is essentially a double-ended queue,
|
||||
but also has a Map stapled to it, just in case we need to access items by id (we don't.)
|
||||
|
||||
The idea is that it can fetch/peek items from the front or back with O(1) timing. This gets around the issue of
|
||||
dynamic arrays taking O(n) time to shift, which is terrible behavior for very long queues like the one we're using.
|
||||
*/
|
||||
export class Deque {
|
||||
#capacity = 0; // The maximum length.
|
||||
#length = 0; // The actual number of items in the queue
|
||||
#front = 0; // The index of the "head" where data is read from the queue.
|
||||
#deleted = 0; // The number of "dead" items in the queue. These occur when items are deleted by index. They are bad.
|
||||
#elements; // An inner array to store the data.
|
||||
#index = new Map(); // A hash table to track items by ID. Try not to delete items using this, it's bad.
|
||||
|
||||
// Create a new queue with a specific capacity.
|
||||
constructor(capacity) {
|
||||
this.#capacity = capacity;
|
||||
this.#elements = new Array(capacity);
|
||||
}
|
||||
|
||||
// You can also convert arrays.
|
||||
static fromArray(array, overallocation = 0) {
|
||||
const result = new Deque(array.length + overallocation);
|
||||
array.forEach(item => result.push(item));
|
||||
return result;
|
||||
}
|
||||
|
||||
// Deleted items don't count towards length, but they still take up space in the array until they can be cleared.
|
||||
// Seriously, don't use the delete function unless it's absolutely necessary.
|
||||
get size() {
|
||||
return this.#length - this.#deleted;
|
||||
}
|
||||
|
||||
isEmpty() {
|
||||
return this.#length - this.#deleted === 0;
|
||||
}
|
||||
|
||||
// Again, "deleted" items still count towards this. Use caution.
|
||||
isFull() {
|
||||
return this.#length === this.#capacity;
|
||||
}
|
||||
|
||||
// The "tail" where data is typically written to.
|
||||
// Unlike the front, which points at the first piece of data, this point at the first empty slot.
|
||||
get #back() {
|
||||
return (this.#front + this.#length) % this.#capacity;
|
||||
}
|
||||
|
||||
// Push a new element into the queue.
|
||||
push(value) {
|
||||
if (this.isFull()) {
|
||||
throw new Error("The deque is full. You cannot add more items.");
|
||||
}
|
||||
this.#elements[this.#back] = value;
|
||||
this.#index.set(value.id, this.#back);
|
||||
++this.#length;
|
||||
}
|
||||
|
||||
// Pop an item off the back of the queue.
|
||||
pop() {
|
||||
while (!this.isEmpty()) {
|
||||
--this.#length;
|
||||
const item = this.#elements[this.#back];
|
||||
this.#elements[this.#back] = undefined; // Free up the item for garbage collection.
|
||||
this.#index.delete(item.id); // Don't confuse index.delete() with this.delete()
|
||||
if (item.status !== "deleted") return item; // Clear any "deleted" items we encounter.
|
||||
else --this.#deleted; // If you needed another reason to avoid deleting by ID, this breaks the O(1) time complexity.
|
||||
}
|
||||
throw new Error("The deque is empty. You cannot delete any items.");
|
||||
}
|
||||
|
||||
// Shift an item off the front of the queue. This is the main method for accessing data.
|
||||
shift() {
|
||||
while (!this.isEmpty()) {
|
||||
// Our pointer already knows exactly where the front of the queue is. This is much faster than the array equivalent.
|
||||
const item = this.#elements[this.#front];
|
||||
this.#elements[this.#front] = undefined;
|
||||
this.#index.delete(item.id);
|
||||
|
||||
// Move the head up and wrap around if we reach the end of the array. This is essentially a circular buffer.
|
||||
this.#front = (this.#front + 1) % this.#capacity;
|
||||
--this.#length;
|
||||
if (item.status !== "deleted") return item;
|
||||
else --this.#deleted;
|
||||
}
|
||||
throw new Error("The deque is empty. You cannot delete any items.");
|
||||
}
|
||||
|
||||
// Place an item at the front of the queue. Slightly slower than pushing, but still faster than doing it on an array.
|
||||
unshift(value) {
|
||||
if (this.isFull()) {
|
||||
throw new Error("The deque is full. You cannot add more items.");
|
||||
}
|
||||
this.#front = (this.#front - 1 + this.#capacity) % this.#capacity;
|
||||
this.#elements[this.#front] = value;
|
||||
this.#index.set(value.id, this.#front);
|
||||
++this.#length;
|
||||
}
|
||||
|
||||
// Peeking at the front is pretty quick, since the head is already looking at it. We just have to clear those pesky "deleted" items first.
|
||||
peekFront() {
|
||||
if (this.isEmpty()) {
|
||||
throw new Error("The deque is empty. You cannot peek.");
|
||||
}
|
||||
|
||||
while (this.#elements[this.#front].status === "deleted") {
|
||||
this.#index.delete(this.#elements[this.#front]?.id);
|
||||
this.#elements[this.#front] = undefined;
|
||||
this.#front = (this.#front + 1) % this.#capacity;
|
||||
--this.#deleted;
|
||||
--this.#length;
|
||||
|
||||
if (this.isEmpty()) {
|
||||
throw new Error("The deque is empty. You cannot peek.");
|
||||
}
|
||||
}
|
||||
return this.#elements[this.#front];
|
||||
}
|
||||
|
||||
// Peeking at the back is ever so slightly slower, since we need to recalculate the pointer.
|
||||
// It's a tradeoff for the faster push function, and it's a very slight difference either way.
|
||||
peekBack() {
|
||||
if (this.isEmpty()) {
|
||||
throw new Error("The deque is empty. You cannot peek.");
|
||||
}
|
||||
|
||||
let back = (this.#front + this.#length - 1) % this.#capacity;
|
||||
while (this.#elements[back].status === "deleted") {
|
||||
this.#index.delete(this.#elements[back].id);
|
||||
this.#elements[back] = undefined;
|
||||
back = (back - 1 + this.#capacity) % this.#capacity;
|
||||
--this.#deleted;
|
||||
--this.#length;
|
||||
|
||||
if (this.isEmpty()) {
|
||||
throw new Error("The deque is empty. You cannot peek.");
|
||||
}
|
||||
}
|
||||
|
||||
return this.#elements[back];
|
||||
}
|
||||
|
||||
// Fill the queue with a single value.
|
||||
fill(value) {
|
||||
while (!this.isFull()) {
|
||||
this.push(value);
|
||||
}
|
||||
}
|
||||
|
||||
// Empty the whole queue.
|
||||
clear() {
|
||||
while (!this.isEmpty()) {
|
||||
this.pop();
|
||||
}
|
||||
}
|
||||
|
||||
// Check if an ID exists.
|
||||
exists(id) {
|
||||
return this.#index.has(id);
|
||||
}
|
||||
|
||||
// Fetch an item by ID
|
||||
get(id) {
|
||||
let pos = this.#index.get(id);
|
||||
return pos !== undefined ? this.#elements[pos] : undefined;
|
||||
}
|
||||
|
||||
// DON'T
|
||||
delete(id) {
|
||||
let item = this.get(id);
|
||||
if (item !== undefined) {
|
||||
item.status = "deleted";
|
||||
++this.#deleted;
|
||||
return item;
|
||||
} else {
|
||||
throw new Error("Item not found in the deque.");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// The recursive server navigation algorithm. The lambda predicate determines which servers to add to the final list.
|
||||
// You can also plug other functions into the lambda to perform other tasks that check all servers at the same time.
|
||||
/** @param {NS} ns */
|
||||
export function getServers(ns, lambdaCondition = () => true, hostname = "home", servers = [], visited = []) {
|
||||
if (visited.includes(hostname)) return;
|
||||
visited.push(hostname);
|
||||
if (lambdaCondition(hostname)) servers.push(hostname);
|
||||
const connectedNodes = ns.scan(hostname);
|
||||
if (hostname !== "home") connectedNodes.shift();
|
||||
for (const node of connectedNodes) getServers(ns, lambdaCondition, node, servers, visited);
|
||||
return servers;
|
||||
}
|
||||
|
||||
// Here are a couple of my own getServers modules.
|
||||
// This one finds the best target for hacking. It tries to balance expected return with time taken.
|
||||
/** @param {NS} ns */
|
||||
export function checkTarget(ns, server, target = "n00dles", forms = false) {
|
||||
if (!ns.hasRootAccess(server)) return target;
|
||||
const player = ns.getPlayer();
|
||||
const serverSim = ns.getServer(server);
|
||||
const pSim = ns.getServer(target);
|
||||
let previousScore;
|
||||
let currentScore;
|
||||
if (serverSim.requiredHackingSkill <= player.skills.hacking / (forms ? 1 : 2)) {
|
||||
if (forms) {
|
||||
serverSim.hackDifficulty = serverSim.minDifficulty;
|
||||
pSim.hackDifficulty = pSim.minDifficulty;
|
||||
previousScore = pSim.moneyMax / ns.formulas.hacking.weakenTime(pSim, player) * ns.formulas.hacking.hackChance(pSim, player);
|
||||
currentScore = serverSim.moneyMax / ns.formulas.hacking.weakenTime(serverSim, player) * ns.formulas.hacking.hackChance(serverSim, player);
|
||||
} else {
|
||||
const weight = (serv) => {
|
||||
// Calculate the difference between max and available money
|
||||
let diff = serv.moneyMax - serv.moneyAvailable;
|
||||
|
||||
// Calculate the scaling factor as the ratio of the difference to the max money
|
||||
// The constant here is just an adjustment to fine tune the influence of the scaling factor
|
||||
let scalingFactor = diff / serv.moneyMax * 0.95;
|
||||
|
||||
// Adjust the weight based on the difference, applying the scaling penalty
|
||||
return (serv.moneyMax / serv.minDifficulty) * (1 - scalingFactor);
|
||||
}
|
||||
previousScore = weight(pSim)
|
||||
currentScore = weight(serverSim)
|
||||
}
|
||||
if (currentScore > previousScore) target = server;
|
||||
}
|
||||
return target;
|
||||
}
|
||||
|
||||
// A simple function for copying a list of scripts to a server.
|
||||
/** @param {NS} ns */
|
||||
export function copyScripts(ns, server, scripts, overwrite = false) {
|
||||
for (const script of scripts) {
|
||||
if ((!ns.fileExists(script, server) || overwrite) && ns.hasRootAccess(server)) {
|
||||
ns.scp(script, server);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// A generic function to check that a given server is prepped. Mostly just a convenience.
|
||||
export function isPrepped(ns, server) {
|
||||
const tolerance = 0.0001;
|
||||
const maxMoney = ns.getServerMaxMoney(server);
|
||||
const money = ns.getServerMoneyAvailable(server);
|
||||
const minSec = ns.getServerMinSecurityLevel(server);
|
||||
const sec = ns.getServerSecurityLevel(server);
|
||||
const secFix = Math.abs(sec - minSec) < tolerance;
|
||||
return (money === maxMoney && secFix) ? true : false;
|
||||
}
|
||||
|
||||
/*
|
||||
This prep function isn't part of the tutorial, but the rest of the code wouldn't work without it.
|
||||
I don't make any guarantees, but I've been using it and it's worked well enough. I'll comment it anyway.
|
||||
The prep strategy uses a modified proto-batching technique, which will be covered in part 2.
|
||||
*/
|
||||
/** @param {NS} ns */
|
||||
export async function prep(ns, values, ramNet) {
|
||||
const maxMoney = values.maxMoney;
|
||||
const minSec = values.minSec;
|
||||
let money = values.money;
|
||||
let sec = values.sec;
|
||||
while (!isPrepped(ns, values.target)) {
|
||||
const wTime = ns.getWeakenTime(values.target);
|
||||
const gTime = wTime * 0.8;
|
||||
const dataPort = ns.getPortHandle(ns.pid);
|
||||
dataPort.clear();
|
||||
|
||||
const pRam = ramNet.cloneBlocks();
|
||||
const maxThreads = Math.floor(ramNet.maxBlockSize / 1.75);
|
||||
const totalThreads = ramNet.prepThreads;
|
||||
let wThreads1 = 0;
|
||||
let wThreads2 = 0;
|
||||
let gThreads = 0;
|
||||
let batchCount = 1;
|
||||
let script, mode;
|
||||
/*
|
||||
Modes:
|
||||
0: Security only
|
||||
1: Money only
|
||||
2: One shot
|
||||
*/
|
||||
|
||||
if (money < maxMoney) {
|
||||
gThreads = Math.ceil(ns.growthAnalyze(values.target, maxMoney / money));
|
||||
wThreads2 = Math.ceil(ns.growthAnalyzeSecurity(gThreads) / 0.05);
|
||||
}
|
||||
if (sec > minSec) {
|
||||
wThreads1 = Math.ceil((sec - minSec) * 20);
|
||||
if (!(wThreads1 + wThreads2 + gThreads <= totalThreads && gThreads <= maxThreads)) {
|
||||
gThreads = 0;
|
||||
wThreads2 = 0;
|
||||
batchCount = Math.ceil(wThreads1 / totalThreads);
|
||||
if (batchCount > 1) wThreads1 = totalThreads;
|
||||
mode = 0;
|
||||
} else mode = 2;
|
||||
} else if (gThreads > maxThreads || gThreads + wThreads2 > totalThreads) {
|
||||
mode = 1;
|
||||
const oldG = gThreads;
|
||||
wThreads2 = Math.max(Math.floor(totalThreads / 13.5), 1);
|
||||
gThreads = Math.floor(wThreads2 * 12.5);
|
||||
batchCount = Math.ceil(oldG / gThreads);
|
||||
} else mode = 2;
|
||||
|
||||
// Big buffer here, since all the previous calculations can take a while. One second should be more than enough.
|
||||
const wEnd1 = Date.now() + wTime + 1000;
|
||||
const gEnd = wEnd1 + values.spacer;
|
||||
const wEnd2 = gEnd + values.spacer;
|
||||
|
||||
// "metrics" here is basically a mock Job object. Again, this is just an artifact of repurposed old code.
|
||||
const metrics = {
|
||||
batch: "prep",
|
||||
target: values.target,
|
||||
type: "none",
|
||||
time: 0,
|
||||
end: 0,
|
||||
port: ns.pid,
|
||||
log: values.log,
|
||||
report: false
|
||||
};
|
||||
|
||||
// Actually assigning threads. We actually allow grow threads to be spread out in mode 1.
|
||||
// This is because we don't mind if the effect is a bit reduced from higher security unlike a normal batcher.
|
||||
// We're not trying to grow a specific amount, we're trying to grow as much as possible.
|
||||
for (const block of pRam) {
|
||||
while (block.ram >= 1.75) {
|
||||
const bMax = Math.floor(block.ram / 1.75)
|
||||
let threads = 0;
|
||||
if (wThreads1 > 0) {
|
||||
script = "S4tWeaken.js";
|
||||
metrics.type = "pWeaken1";
|
||||
metrics.time = wTime;
|
||||
metrics.end = wEnd1;
|
||||
threads = Math.min(wThreads1, bMax);
|
||||
if (wThreads2 === 0 && wThreads1 - threads <= 0) metrics.report = true;
|
||||
wThreads1 -= threads;
|
||||
} else if (wThreads2 > 0) {
|
||||
script = "S4tWeaken.js";
|
||||
metrics.type = "pWeaken2";
|
||||
metrics.time = wTime;
|
||||
metrics.end = wEnd2;
|
||||
threads = Math.min(wThreads2, bMax);
|
||||
if (wThreads2 - threads === 0) metrics.report = true;
|
||||
wThreads2 -= threads;
|
||||
} else if (gThreads > 0 && mode === 1) {
|
||||
script = "S4tGrow.js";
|
||||
metrics.type = "pGrow";
|
||||
metrics.time = gTime;
|
||||
metrics.end = gEnd;
|
||||
threads = Math.min(gThreads, bMax);
|
||||
metrics.report = false;
|
||||
gThreads -= threads;
|
||||
} else if (gThreads > 0 && bMax >= gThreads) {
|
||||
script = "S4tGrow.js";
|
||||
metrics.type = "pGrow";
|
||||
metrics.time = gTime;
|
||||
metrics.end = gEnd;
|
||||
threads = gThreads;
|
||||
metrics.report = false;
|
||||
gThreads = 0;
|
||||
} else break;
|
||||
metrics.server = block.server;
|
||||
const pid = ns.exec(script, block.server, { threads: threads, temporary: true }, JSON.stringify(metrics));
|
||||
if (!pid) throw new Error("Unable to assign all jobs.");
|
||||
block.ram -= 1.75 * threads;
|
||||
}
|
||||
}
|
||||
|
||||
// Fancy UI stuff to update you on progress.
|
||||
const tEnd = ((mode === 0 ? wEnd1 : wEnd2) - Date.now()) * batchCount + Date.now();
|
||||
const timer = setInterval(() => {
|
||||
ns.clearLog();
|
||||
switch (mode) {
|
||||
case 0:
|
||||
ns.print(`Weakening security on ${values.target}...`);
|
||||
break;
|
||||
case 1:
|
||||
ns.print(`Maximizing money on ${values.target}...`);
|
||||
break;
|
||||
case 2:
|
||||
ns.print(`Finalizing preparation on ${values.target}...`);
|
||||
}
|
||||
ns.print(`Security: +${ns.formatNumber(sec - minSec, 3)}`);
|
||||
ns.print(`Money: \$${ns.formatNumber(money, 2)}/${ns.formatNumber(maxMoney, 2)}`);
|
||||
const time = tEnd - Date.now();
|
||||
ns.print(`Estimated time remaining: ${ns.tFormat(time)}`);
|
||||
ns.print(`~${batchCount} ${(batchCount === 1) ? "batch" : "batches"}.`);
|
||||
}, 200);
|
||||
ns.atExit(() => clearInterval(timer));
|
||||
|
||||
// Wait for the last weaken to finish.
|
||||
do await dataPort.nextWrite(); while (!dataPort.read().startsWith("pWeaken"));
|
||||
clearInterval(timer);
|
||||
await ns.sleep(100);
|
||||
|
||||
money = ns.getServerMoneyAvailable(values.target);
|
||||
sec = ns.getServerSecurityLevel(values.target);
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
// I don't actually use this anywhere it the code. It's a debugging tool that I use to test the runtimes of functions.
|
||||
export function benchmark(lambda) {
|
||||
let result = 0;
|
||||
for (let i = 0; i <= 1000; ++i) {
|
||||
const start = performance.now();
|
||||
lambda(i);
|
||||
result += performance.now() - start;
|
||||
}
|
||||
return result / 1000;
|
||||
}
|
||||
1
local/path/home/ServerRouteList.txt
Normal file
1
local/path/home/ServerRouteList.txt
Normal file
@@ -0,0 +1 @@
|
||||
{"home":{"n00dles":{},"foodnstuff":{},"sigma-cosmetics":{"zer0":{"omega-net":{"the-hub":{},"netlink":{"rothman-uni":{"rho-construction":{"aerocorp":{"unitalife":{"icarus":{"infocomm":{"titan-labs":{"fulcrumtech":{"omnitek":{},"4sigma":{"powerhouse-fitness":{}}}}}},"solaris":{"nova-med":{"run4theh111z":{}}}}}}},"catalyst":{}}}},"max-hardware":{}},"joesguns":{},"hong-fang-tea":{},"harakiri-sushi":{"nectar-net":{"neo-net":{"avmnite-02h":{}},"phantasy":{}},"CSEC":{"silver-helix":{"computek":{"zb-institute":{"lexo-corp":{"global-pharm":{"omnia":{"defcomm":{"zb-def":{"microdyne":{"vitalife":{"kuai-gong":{},".":{"b-and-a":{},"blade":{"fulcrumassets":{}},"nwo":{"The-Cave":{}},"clarkinc":{"ecorp":{},"megacorp":{}}}}}}}},"deltaone":{"univ-energy":{"taiyang-digital":{"applied-energetics":{"stormtech":{},"helios":{}}}},"zeus-med":{}}}},"alpha-ent":{"galactic-cyber":{}}}},"johnson-ortho":{"summit-uni":{"millenium-fitness":{}},"I.I.I.I":{"aevum-police":{"snap-fitness":{}}}},"crush-fitness":{"syscore":{}}}}},"iron-gym":{},"darkweb":{},"pserv-0":{},"pserv-1":{},"pserv-2":{},"pserv-3":{},"pserv-4":{},"pserv-5":{},"pserv-6":{},"pserv-7":{},"pserv-8":{},"pserv-9":{},"pserv-10":{},"pserv-11":{},"pserv-12":{},"pserv-13":{},"pserv-14":{},"pserv-15":{},"pserv-16":{},"pserv-17":{},"pserv-18":{},"pserv-19":{},"pserv-20":{},"pserv-21":{},"pserv-22":{},"pserv-23":{},"pserv-24":{}}}
|
||||
42
local/path/home/Serverlist.js
Normal file
42
local/path/home/Serverlist.js
Normal file
@@ -0,0 +1,42 @@
|
||||
import { getCracks, scanServerList, findBestTarget, crackingAndRooting, copyAndRunScript, purchaseAndUpgradeServers } from "/RamsesUtils.js";
|
||||
|
||||
/** @param {NS} ns */
|
||||
export async function main(ns) {
|
||||
const funnyScript = ["batch.js", "Ramses-grow.js", "Ramses-weaken.js", "Ramses-hack.js"];
|
||||
//write function to purchase scripts from tor network and rerun getCracks() then recrack and reroot
|
||||
let cracks = {};
|
||||
cracks = getCracks(ns);
|
||||
let maxPorts = Object.keys(cracks).length;
|
||||
scanServerList(ns);
|
||||
let manualTargetOverride = "";
|
||||
if (ns.getHackingLevel() < 200) {
|
||||
manualTargetOverride = "n00dles";
|
||||
};
|
||||
|
||||
findBestTarget(ns, 999, maxPorts, ns.getHackingLevel(), manualTargetOverride);
|
||||
let bestTarget = ns.read("bestTarget.txt")
|
||||
|
||||
ns.tprint("Best Target: " + bestTarget);
|
||||
ns.tprint(Object.keys(JSON.parse(ns.read("serverList.txt"))).length);
|
||||
crackingAndRooting(ns, cracks, funnyScript, true);
|
||||
ns.exec(funnyScript[0], "home", 1, JSON.parse(bestTarget).serverName, 500, true);
|
||||
let reset = ns.args[0];
|
||||
ns.print(reset);
|
||||
if (reset === true) {
|
||||
ns.tprint("reset === true")
|
||||
findBestTarget(ns, 999, maxPorts, ns.getHackingLevel(), manualTargetOverride);
|
||||
let serverList = JSON.parse(ns.read("serverList.txt"));
|
||||
for (const [name, entry] of Object.entries(serverList)) {
|
||||
|
||||
copyAndRunScript(ns, funnyScript, name);
|
||||
}
|
||||
}
|
||||
/*let serverListForFiles = JSON.parse(ns.read("serverList.txt"));
|
||||
for (const [name2, entry2] of Object.entries(serverListForFiles)) {
|
||||
ns.tprint(name2 + " Files: " + entry2.serverFiles)
|
||||
}*/
|
||||
//await ns.sleep(500000);
|
||||
await purchaseAndUpgradeServers(ns);
|
||||
|
||||
}
|
||||
|
||||
5
local/path/home/StartControllers.js
Normal file
5
local/path/home/StartControllers.js
Normal file
@@ -0,0 +1,5 @@
|
||||
import { getCracks, scanServerList, findBestTarget, crackingAndRooting, copyAndRunScript, runControllerOnPserv } from "/RamsesUtils.js";
|
||||
/** @param {NS} ns */
|
||||
export async function main(ns) {
|
||||
await runControllerOnPserv(ns)
|
||||
}
|
||||
14
local/path/home/Startup.js
Normal file
14
local/path/home/Startup.js
Normal file
@@ -0,0 +1,14 @@
|
||||
/** @param {NS} ns */
|
||||
export async function main(ns) {
|
||||
let player = null;
|
||||
player = ns.getPlayer();
|
||||
ns.tprint(player)
|
||||
/*
|
||||
while (true) {
|
||||
|
||||
|
||||
ns.singularity.
|
||||
|
||||
}
|
||||
*/
|
||||
}
|
||||
21
local/path/home/analyzeContract.js
Normal file
21
local/path/home/analyzeContract.js
Normal file
@@ -0,0 +1,21 @@
|
||||
/** @param {NS} ns */
|
||||
export async function main(ns) {
|
||||
const sTarget = ns.args[0]; // target server which has the contract
|
||||
const sContract = ns.args[1]; // target contract file
|
||||
|
||||
//ns.tprint(ns.codingcontract.getContractTypes());
|
||||
//ns.codingcontract.createDummyContract();
|
||||
|
||||
const sContractType = ns.codingcontract.getContractType(sContract, sTarget);
|
||||
const sContractData = ns.codingcontract.getData(sContract, sTarget);
|
||||
const sContractDescription = ns.codingcontract.getDescription(sContract, sTarget);
|
||||
const sContractTries = ns.codingcontract.getNumTriesRemaining(sContract, sTarget);
|
||||
|
||||
|
||||
ns.tprint("sContractType = " + sContractType);
|
||||
ns.tprint("sContractData = " + sContractData);
|
||||
ns.tprint("sContractDescription = " + sContractDescription);
|
||||
ns.tprint("sContractTries = " + sContractTries);
|
||||
|
||||
JSON.stringify(sContractType,sContractType, true);
|
||||
}
|
||||
147
local/path/home/batch.js
Normal file
147
local/path/home/batch.js
Normal file
@@ -0,0 +1,147 @@
|
||||
/** @param {NS} ns */
|
||||
export async function main(ns) {
|
||||
//Arguments
|
||||
const sTarget = ns.args[0]; // target server
|
||||
let nFrequency = ns.args[1]; // frequency to run the Hack / Grow / Weaken
|
||||
const bIgnoreRAM = ns.args[2]; //if true the script will run even if estimated RAM is too low
|
||||
|
||||
//Settings
|
||||
const sWeakenScript = "Ramses-weaken.js";
|
||||
const sGrowScript = "Ramses-grow.js";
|
||||
const sHackScript = "Ramses-hack.js";
|
||||
const nWeakenThreads = 5;
|
||||
const nGrowThreads = 10;
|
||||
const nHackThreads = 1;
|
||||
|
||||
//logs
|
||||
ns.disableLog("getServerMaxRam");
|
||||
ns.disableLog("getServerUsedRam");
|
||||
ns.disableLog("getServerMinSecurityLevel");
|
||||
ns.disableLog("getServerMaxMoney");
|
||||
ns.disableLog("getServerSecurityLevel");
|
||||
ns.disableLog("getServerMoneyAvailable");
|
||||
|
||||
|
||||
//abort script if sTarget is undefined
|
||||
if (sTarget === undefined) {
|
||||
ns.tprint("1st arg sTarget is undefined");
|
||||
return false;
|
||||
}
|
||||
|
||||
//how often do we run script in milliseconds
|
||||
if (nFrequency === undefined) {
|
||||
nFrequency = 20000; //run every 20 seconds unless defined as the 2nd argument when calling the script
|
||||
}
|
||||
|
||||
//target server info
|
||||
const nMinSecurity = ns.getServerMinSecurityLevel(sTarget);
|
||||
const nMaxMoney = ns.getServerMaxMoney(sTarget);
|
||||
|
||||
//abort script if sTarget cant have money
|
||||
if (nMaxMoney <= 0) {
|
||||
ns.tprint("sTarget (" + sTarget + ") has no nMaxMoney");
|
||||
return false;
|
||||
}
|
||||
|
||||
//main variables
|
||||
const oRunner = ns.getServer(); //which server object is running this script
|
||||
const sRunner = oRunner.hostname; //hostname string of the server running the script
|
||||
const nMaxRAM = ns.getServerMaxRam(sRunner);
|
||||
const nUsedRAM = ns.getServerUsedRam(sRunner);
|
||||
let nFreeRam = nMaxRAM - nUsedRAM;
|
||||
const sScriptName = ns.getScriptName();
|
||||
const nScriptSize = ns.getScriptRam(sScriptName, sRunner);
|
||||
const nCores = oRunner.cpuCores;
|
||||
|
||||
const nWeakenScriptRAM = ns.getScriptRam(sWeakenScript, sRunner) * nWeakenThreads;
|
||||
const nGrowScriptRAM = ns.getScriptRam(sGrowScript, sRunner) * nGrowThreads;
|
||||
const nHackScriptRAM = ns.getScriptRam(sHackScript, sRunner) * nHackThreads;
|
||||
|
||||
const nWeakenTime = ns.getWeakenTime(sTarget);
|
||||
const nGrowTime = ns.getGrowTime(sTarget);
|
||||
const nHackTime = ns.getHackTime(sTarget);
|
||||
|
||||
ns.tprint(sScriptName + " nScriptSize = " + nScriptSize+"GB");
|
||||
|
||||
const nMaxWeakenRAM = Math.ceil(nWeakenScriptRAM * ((nWeakenTime / 1000) / (nFrequency / 1000)));
|
||||
ns.tprint("nWeakenTime = " + nWeakenTime / 1000);
|
||||
ns.tprint("nFrequency = " + nFrequency / 1000);
|
||||
ns.tprint("nMaxWeakenRAM = " + nMaxWeakenRAM);
|
||||
|
||||
const nMaxGrowRAM = Math.ceil(nGrowScriptRAM * ((nGrowTime / 1000) / (nFrequency / 1000)));
|
||||
ns.tprint("nGrowTime = " + nGrowTime / 1000);
|
||||
ns.tprint("nFrequency = " + nFrequency / 1000);
|
||||
ns.tprint("nMaxGrowRAM = " + nMaxGrowRAM);
|
||||
|
||||
const nMaxHackRAM = Math.ceil(nHackScriptRAM * ((nHackTime / 1000) / (nFrequency / 1000)));
|
||||
ns.tprint("nHackTime = " + nHackTime / 1000);
|
||||
ns.tprint("nFrequency = " + nFrequency / 1000);
|
||||
ns.tprint("nMaxHackRAM = " + nMaxHackRAM);
|
||||
|
||||
const nTotalRAM = (nScriptSize + nMaxWeakenRAM + nMaxGrowRAM + nMaxHackRAM) * 1.07;
|
||||
ns.tprint("Total estimated required RAM = " + nTotalRAM+"GB");
|
||||
ns.tprint("Available RAM: " + nFreeRam+" / "+nMaxRAM+"GB");
|
||||
|
||||
if (nTotalRAM < nFreeRam || bIgnoreRAM === true) {
|
||||
while (true) {
|
||||
|
||||
//server stats
|
||||
let nCurrentSecurity = ns.getServerSecurityLevel(sTarget);
|
||||
let nCurrentMoney = ns.getServerMoneyAvailable(sTarget);
|
||||
|
||||
//timestamp
|
||||
let currentDate = new Date();
|
||||
let nOffset;
|
||||
|
||||
ns.print("Cash: " + (Math.floor(nCurrentMoney * 1000) / 1000) + " / " + nMaxMoney);
|
||||
ns.print("Security: " + (Math.floor(nCurrentSecurity * 1000) / 1000) + " / " + nMinSecurity);
|
||||
|
||||
//Calculate estimate time of completion
|
||||
nOffset = ns.getWeakenTime(sTarget);
|
||||
let nWeakTime = new Date(currentDate.getTime() + nOffset);
|
||||
let sWeakTime = nWeakTime.toLocaleTimeString('sw-SV'); //swedish time
|
||||
|
||||
//Print estimated time of completion
|
||||
ns.print("Weakening " + sTarget + " Estimated complete at " + sWeakTime);
|
||||
|
||||
|
||||
|
||||
|
||||
if (nCurrentSecurity <= (nMinSecurity + 5)) {
|
||||
|
||||
//Calculate estimate time of completion
|
||||
nOffset = ns.getGrowTime(sTarget);
|
||||
let nGrowTime = new Date(currentDate.getTime() + nOffset);
|
||||
let sGrowTime = nGrowTime.toLocaleTimeString('sw-SV'); //swedish time
|
||||
|
||||
//Print estimated time of completion
|
||||
ns.print("Growing " + sTarget + " Estimated complete at " + sGrowTime);
|
||||
|
||||
|
||||
|
||||
if (nCurrentMoney >= nMaxMoney * 0.8) {
|
||||
|
||||
//Calculate estimate time of completion
|
||||
nOffset = ns.getHackTime(sTarget);
|
||||
let nHackTime = new Date(currentDate.getTime() + nOffset);
|
||||
let sHackTime = nHackTime.toLocaleTimeString('sw-SV'); //swedish time
|
||||
|
||||
//Print estimated time of completion
|
||||
ns.print("Hacking " + sTarget + " Estimated complete at " + sHackTime);
|
||||
|
||||
ns.run(sHackScript, 1, sTarget);
|
||||
}
|
||||
ns.run(sGrowScript, 10, sTarget);
|
||||
}
|
||||
ns.run(sWeakenScript, 5, sTarget);
|
||||
nFreeRam = ns.getServerMaxRam(sRunner) - ns.getServerUsedRam(sRunner);
|
||||
|
||||
await ns.sleep(nFrequency);
|
||||
ns.print("-------------------------------------------------------------------------");
|
||||
}
|
||||
}
|
||||
else {
|
||||
ns.tprint("Insufficient estimated required RAM... no scripts were started...");
|
||||
}
|
||||
|
||||
}
|
||||
1
local/path/home/bestTarget.txt
Normal file
1
local/path/home/bestTarget.txt
Normal file
@@ -0,0 +1 @@
|
||||
{"serverName":"omega-net","maxRam":32,"maxMoney":69174578,"minSec":10,"minPorts":2,"minHackLvl":202,"rootAccess":true,"factorMoneyPerTime":0.0319075530954955,"openPorts":0,"serverFiles":["S4tGrow.js","S4tHack.js","S4tWeaken.js","the-new-god.lit"]}
|
||||
384
local/path/home/controller.js
Normal file
384
local/path/home/controller.js
Normal file
@@ -0,0 +1,384 @@
|
||||
/*
|
||||
Welcome to part 3. I'll only be commenting on things that have changed from the previous parts, so if there's something
|
||||
confusing, be sure to go back and look at parts 1 and 2 for more detailed explanations.
|
||||
|
||||
This time we're going to make a shotgun batcher. In some ways this is really just a protobatcher that makes a
|
||||
much larger batch. We're going to fill up ram with as many batches as we can manage, wait for them to finish, then
|
||||
fire off another blast.
|
||||
|
||||
Note that this is mainly written with the fact that I intend to adapt this into a continuous batcher later in mind.
|
||||
There are far more optimal ways to run a shotgun-style batcher, but rather than make the best shotgun I could,
|
||||
I aimed to make this an ideal stepping stone on the quest for a continuous batcher.
|
||||
*/
|
||||
|
||||
import { getServers, copyScripts, checkTarget, isPrepped, prep } from "/utils.js";
|
||||
|
||||
const TYPES = ["hack", "weaken1", "grow", "weaken2"];
|
||||
const WORKERS = ["tHack.js", "tWeaken.js", "tGrow.js"];
|
||||
const SCRIPTS = { hack: "tHack.js", weaken1: "tWeaken.js", grow: "tGrow.js", weaken2: "tWeaken.js" };
|
||||
const COSTS = { hack: 1.7, weaken1: 1.75, grow: 1.75, weaken2: 1.75 };
|
||||
// We won't be using the offsets anymore, but I've left them here in case we bring them back for a later part.
|
||||
// const OFFSETS = { hack: 0, weaken1: 1, grow: 2, weaken2: 3 };
|
||||
|
||||
/** @param {NS} ns */
|
||||
export async function main(ns) {
|
||||
ns.disableLog("ALL");
|
||||
ns.tail();
|
||||
|
||||
while (true) {
|
||||
// Setup is mostly the same.
|
||||
const dataPort = ns.getPortHandle(ns.pid);
|
||||
dataPort.clear();
|
||||
let target = "n00dles";
|
||||
const servers = getServers(ns, (server) => {
|
||||
target = checkTarget(ns, server, target, ns.fileExists("Formulas.exe", "home"));
|
||||
copyScripts(ns, server, WORKERS, true);
|
||||
return ns.hasRootAccess(server);
|
||||
});
|
||||
/* manual override */
|
||||
target = "max-hardware";
|
||||
const ramNet = new RamNet(ns, servers);
|
||||
const metrics = new Metrics(ns, target);
|
||||
if (!isPrepped(ns, target)) await prep(ns, metrics, ramNet);
|
||||
ns.clearLog();
|
||||
ns.print("Optimizing. This may take a few seconds...")
|
||||
/*
|
||||
New optimizer is async because it can take upwards of 5 seconds to run. We can afford the heavy
|
||||
computations because shotgun batchers are very front-loaded. In a "real" shotgun batcher, you'll want
|
||||
to modify the ramnet so that you can do this during the downtime between mega-batches.
|
||||
*/
|
||||
await optimizeShotgun(ns, metrics, ramNet); // See the function below for details.
|
||||
metrics.calculate(ns);
|
||||
|
||||
// I've renamed the schedule array from "batch" to "jobs" just for clarity purposes.
|
||||
// The batchCount declaration has also been moved down here because we use it for scheduling.
|
||||
const jobs = [];
|
||||
let batchCount = 0;
|
||||
|
||||
// Another change. Instead of tracking the end times by type, I'm now using a unified end time.
|
||||
// This makes the scheduling a bit simpler as long as we're always going in chronological order.
|
||||
metrics.end = Date.now() + metrics.wTime - metrics.spacer;
|
||||
|
||||
// Instead of one batch, we repeat the scheduling based on the depth calculated by the optimizer.
|
||||
while (batchCount++ < metrics.depth) {
|
||||
for (const type of TYPES) {
|
||||
// As you can see, calculating the end time for each new job is much simpler this way.
|
||||
// The rest of the scheduling is mostly unchanged.
|
||||
metrics.end += metrics.spacer;
|
||||
|
||||
// Batchcount is part of the constructor now. Yes I was that lazy in the last part.
|
||||
const job = new Job(type, metrics, batchCount);
|
||||
if (!ramNet.assign(job)) {
|
||||
ns.print(`ERROR: Unable to assign ${type}. Dumping debug info:`);
|
||||
ns.print(job);
|
||||
ns.print(metrics);
|
||||
ramNet.printBlocks(ns);
|
||||
return;
|
||||
}
|
||||
jobs.push(job);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
Deployment is completely unchanged. However, with the much larger batch sizes, you may find that
|
||||
this can potentially freeze the game for minutes at a time. If it's too disruptive or triggers the
|
||||
infinite loop failsafe, you can uncomment the sleep line.
|
||||
|
||||
There's really no need to do this synchronously for our batcher, but in a "real" shotgun batcher, you wouldn't
|
||||
use any spacers at all, and try to keep deployment time and execution time down to as little as possible in order
|
||||
to minimize downtime.
|
||||
*/
|
||||
for (const job of jobs) {
|
||||
job.end += metrics.delay;
|
||||
const jobPid = ns.exec(SCRIPTS[job.type], job.server, { threads: job.threads, temporary: true }, JSON.stringify(job));
|
||||
if (!jobPid) throw new Error(`Unable to deploy ${job.type}`);
|
||||
const tPort = ns.getPortHandle(jobPid);
|
||||
await tPort.nextWrite();
|
||||
metrics.delay += tPort.read();
|
||||
}
|
||||
|
||||
/*
|
||||
This is a silly hack. Due to the way arrays work in JS, pop() is much faster than shift() and we're
|
||||
going to be accessing these jobs in FIFO order in a moment (ie. a queue). Since we've got lots of downtime
|
||||
and the jobs array can get really huge, I just reverse them now to save time later.
|
||||
|
||||
We'll be implementing a more sophisticated schedule in the next part.
|
||||
*/
|
||||
jobs.reverse();
|
||||
|
||||
// I've stepped up the logging/feedback a bit here, but it's otherwise pretty much the same.
|
||||
const timer = setInterval(() => {
|
||||
ns.clearLog();
|
||||
ns.print(`Hacking ~\$${ns.formatNumber(metrics.maxMoney * metrics.greed * batchCount * metrics.chance)} from ${metrics.target}`);
|
||||
ns.print(`Greed: ${Math.floor(metrics.greed * 1000) / 10}%`);
|
||||
ns.print(`Ram available: ${ns.formatRam(ramNet.totalRam)}/${ns.formatRam(ramNet.maxRam)}`);
|
||||
ns.print(`Total delay: ${metrics.delay}ms`);
|
||||
ns.print(`Active jobs remaining: ${jobs.length}`);
|
||||
ns.print(`ETA ${ns.tFormat(metrics.end - Date.now())}`);
|
||||
}, 1000);
|
||||
ns.atExit(() => {
|
||||
clearInterval(timer);
|
||||
});
|
||||
|
||||
/*
|
||||
As each job finishes, we update the ramnet to reflect it. Once the queue is empty, we start over.
|
||||
Updating the ramnet like this isn't really necessary since we're just going to rebuild it entirely in
|
||||
the next iteration, but I wanted to demonstrate what it will look like in preparation for the next part.
|
||||
*/
|
||||
do {
|
||||
await dataPort.nextWrite();
|
||||
dataPort.clear();
|
||||
|
||||
// It's technically possible that some of these might finish out of order due to lag or something.
|
||||
// But it doesn't actually matter since we're not doing anything with this data yet.
|
||||
ramNet.finish(jobs.pop());
|
||||
} while (jobs.length > 0);
|
||||
clearInterval(timer);
|
||||
}
|
||||
}
|
||||
|
||||
// The Job class, lean as it is, remains mostly unchanged. I got rid of the server argument since I wasn't using it
|
||||
// and added a batch number instead.
|
||||
class Job {
|
||||
constructor(type, metrics, batch) {
|
||||
this.type = type;
|
||||
// this.end = metrics.ends[type]; // Left in for now, in case I decided to use it again later.
|
||||
this.end = metrics.end; // Using the unified end time now.
|
||||
this.time = metrics.times[type];
|
||||
this.target = metrics.target;
|
||||
this.threads = metrics.threads[type];
|
||||
this.cost = this.threads * COSTS[type];
|
||||
this.server = "none";
|
||||
this.report = true; // All workers now report when they finish.
|
||||
this.port = metrics.port;
|
||||
this.batch = batch;
|
||||
|
||||
// Future stuff. Ignore these.
|
||||
// this.status = "active";
|
||||
// this.id = type + batch;
|
||||
}
|
||||
}
|
||||
|
||||
// Almost entirely the same, aside from the changes to end time.
|
||||
/** @param {NS} ns */
|
||||
class Metrics {
|
||||
constructor(ns, server) {
|
||||
this.target = server;
|
||||
this.maxMoney = ns.getServerMaxMoney(server);
|
||||
this.money = Math.max(ns.getServerMoneyAvailable(server), 1);
|
||||
this.minSec = ns.getServerMinSecurityLevel(server);
|
||||
this.sec = ns.getServerSecurityLevel(server);
|
||||
this.prepped = isPrepped(ns, server);
|
||||
this.chance = 0;
|
||||
this.wTime = 0;
|
||||
this.delay = 0;
|
||||
this.spacer = 5;
|
||||
this.greed = 0.1;
|
||||
this.depth = 0; // The number of concurrent batches to run. Set by the optimizer.
|
||||
|
||||
this.times = { hack: 0, weaken1: 0, grow: 0, weaken2: 0 };
|
||||
this.end = 0; // Slight change for the new timing. The old way in commented out in case I switch back later.
|
||||
// this.ends = { hack: 0, weaken1: 0, grow: 0, weaken2: 0 };
|
||||
this.threads = { hack: 0, weaken1: 0, grow: 0, weaken2: 0 };
|
||||
|
||||
this.port = ns.pid;
|
||||
}
|
||||
|
||||
// Almost totally unchanged, except that I've commented out the default depth calculation, since it's done elsewhere.
|
||||
calculate(ns, greed = this.greed) {
|
||||
const server = this.target;
|
||||
const maxMoney = this.maxMoney;
|
||||
this.money = ns.getServerMoneyAvailable(server);
|
||||
this.sec = ns.getServerSecurityLevel(server);
|
||||
this.wTime = ns.getWeakenTime(server);
|
||||
this.times.weaken1 = this.wTime;
|
||||
this.times.weaken2 = this.wTime;
|
||||
this.times.hack = this.wTime / 4;
|
||||
this.times.grow = this.wTime * 0.8;
|
||||
// this.depth = this.wTime / this.spacer * 4;
|
||||
|
||||
const hPercent = ns.hackAnalyze(server);
|
||||
const amount = maxMoney * greed;
|
||||
const hThreads = Math.max(Math.floor(ns.hackAnalyzeThreads(server, amount)), 1);
|
||||
const tGreed = hPercent * hThreads;
|
||||
|
||||
// Okay I lied. We now overestimate grow threads by 1%. This helps prevent level ups from causing desyncs.
|
||||
// Only a little, though. If you gain too many levels per shotgun blast, it will still have to re-prep the server.
|
||||
const gThreads = Math.ceil(ns.growthAnalyze(server, maxMoney / (maxMoney - maxMoney * tGreed)) * 1.01);
|
||||
this.threads.weaken1 = Math.max(Math.ceil(hThreads * 0.002 / 0.05), 1);
|
||||
this.threads.weaken2 = Math.max(Math.ceil(gThreads * 0.004 / 0.05), 1);
|
||||
this.threads.hack = hThreads;
|
||||
this.threads.grow = gThreads;
|
||||
this.chance = ns.hackAnalyzeChance(server);
|
||||
}
|
||||
}
|
||||
|
||||
// Once again, not a whole lot of changes. I've added a new function in support of the optimizer. Details below.
|
||||
/** @param {NS} ns */
|
||||
class RamNet {
|
||||
#blocks = [];
|
||||
#minBlockSize = Infinity;
|
||||
#maxBlockSize = 0;
|
||||
#totalRam = 0;
|
||||
#maxRam = 0;
|
||||
#prepThreads = 0;
|
||||
#index = new Map();
|
||||
constructor(ns, servers) {
|
||||
for (const server of servers) {
|
||||
if (ns.hasRootAccess(server)) {
|
||||
const maxRam = ns.getServerMaxRam(server);
|
||||
const ram = maxRam - ns.getServerUsedRam(server);
|
||||
if (ram >= 1.60) {
|
||||
const block = { server: server, ram: ram };
|
||||
this.#blocks.push(block);
|
||||
if (ram < this.#minBlockSize) this.#minBlockSize = ram;
|
||||
if (ram > this.#maxBlockSize) this.#maxBlockSize = ram;
|
||||
this.#totalRam += ram;
|
||||
this.#maxRam += maxRam;
|
||||
this.#prepThreads += Math.floor(ram / 1.75);
|
||||
}
|
||||
}
|
||||
}
|
||||
this.#sort();
|
||||
this.#blocks.forEach((block, index) => this.#index.set(block.server, index));
|
||||
}
|
||||
|
||||
#sort() {
|
||||
this.#blocks.sort((x, y) => {
|
||||
if (x.server === "home") return 1;
|
||||
if (y.server === "home") return -1;
|
||||
|
||||
return x.ram - y.ram;
|
||||
});
|
||||
}
|
||||
|
||||
get totalRam() {
|
||||
return this.#totalRam;
|
||||
}
|
||||
|
||||
get maxRam() {
|
||||
return this.#maxRam;
|
||||
}
|
||||
|
||||
get maxBlockSize() {
|
||||
return this.#maxBlockSize;
|
||||
}
|
||||
|
||||
get prepThreads() {
|
||||
return this.#prepThreads;
|
||||
}
|
||||
|
||||
getBlock(server) {
|
||||
if (this.#index.has(server)) {
|
||||
return this.#blocks[this.#index.get(server)];
|
||||
} else {
|
||||
throw new Error(`Server ${server} not found in RamNet.`);
|
||||
}
|
||||
}
|
||||
|
||||
assign(job) {
|
||||
const block = this.#blocks.find(block => block.ram >= job.cost);
|
||||
if (block) {
|
||||
job.server = block.server;
|
||||
block.ram -= job.cost;
|
||||
this.#totalRam -= job.cost;
|
||||
return true;
|
||||
} else return false;
|
||||
}
|
||||
|
||||
finish(job) {
|
||||
const block = this.getBlock(job.server);
|
||||
block.ram += job.cost;
|
||||
this.#totalRam += job.cost;
|
||||
}
|
||||
|
||||
cloneBlocks() {
|
||||
return this.#blocks.map(block => ({ ...block }));
|
||||
}
|
||||
|
||||
printBlocks(ns) {
|
||||
for (const block of this.#blocks) ns.print(block);
|
||||
}
|
||||
|
||||
// This function takes an array of job costs and simulates assigning them to see how many batches it can fit.
|
||||
testThreads(threadCosts) {
|
||||
// Clone the blocks, since we don't want to actually change the ramnet.
|
||||
const pRam = this.cloneBlocks();
|
||||
let batches = 0;
|
||||
let found = true;
|
||||
while (found) {
|
||||
// Pretty much just a copy of assign(). Repeat until a batch fails to assign all it's jobs.
|
||||
for (const cost of threadCosts) {
|
||||
found = false;
|
||||
const block = pRam.find(block => block.ram >= cost);
|
||||
if (block) {
|
||||
block.ram -= cost;
|
||||
found = true;
|
||||
} else break;
|
||||
}
|
||||
if (found) batches++; // If all of the jobs were assigned successfully, +1 batch and loop.
|
||||
}
|
||||
return batches; // Otherwise, we've found our number.
|
||||
}
|
||||
}
|
||||
|
||||
// This one's got some pretty big changes, even if it doesn't look like it. For one, it's now async, and you'll see why.
|
||||
/**
|
||||
* @param {NS} ns
|
||||
* @param {Metrics} metrics
|
||||
* @param {RamNet} ramNet
|
||||
*/
|
||||
async function optimizeShotgun(ns, metrics, ramNet) {
|
||||
// Setup is mostly the same.
|
||||
const maxThreads = ramNet.maxBlockSize / 1.75;
|
||||
const maxMoney = metrics.maxMoney;
|
||||
const hPercent = ns.hackAnalyze(metrics.target);
|
||||
const wTime = ns.getWeakenTime(metrics.target); // We'll need this for one of our calculations.
|
||||
|
||||
const minGreed = 0.001;
|
||||
const stepValue = 0.01; // Step value is now 10x higher. If you think that's overkill, it's not.
|
||||
let greed = 0.99;
|
||||
let best = 0; // Initializing the best value found.
|
||||
|
||||
// This algorithm starts out pretty much the same. We begin by weeding out the obviously way too huge greed levels.
|
||||
while (greed > minGreed) {
|
||||
const amount = maxMoney * greed;
|
||||
const hThreads = Math.max(Math.floor(ns.hackAnalyzeThreads(metrics.target, amount)), 1);
|
||||
const tGreed = hPercent * hThreads;
|
||||
// 1% overestimation here too. Always make sure your calculations match.
|
||||
const gThreads = Math.ceil(ns.growthAnalyze(metrics.target, maxMoney / (maxMoney - maxMoney * tGreed)) * 1.01);
|
||||
|
||||
if (Math.max(hThreads, gThreads) <= maxThreads) {
|
||||
const wThreads1 = Math.max(Math.ceil(hThreads * 0.002 / 0.05), 1);
|
||||
const wThreads2 = Math.max(Math.ceil(gThreads * 0.004 / 0.05), 1);
|
||||
|
||||
const threadCosts = [hThreads * 1.7, wThreads1 * 1.75, gThreads * 1.75, wThreads2 * 1.75];
|
||||
|
||||
// These lines were supposed to help weed out a few more too-high values, but in my unit tests they never
|
||||
// actually did anything. Uncomment them if you want.
|
||||
// const totalCost = threadCosts.reduce((t, c) => t + c);
|
||||
// if (totalCost > ramNet.totalRam) continue;
|
||||
|
||||
/*
|
||||
Here's where it all changes. First we calculate the number of batches we can fit into ram at the current
|
||||
greed level. Then we calculate how much money that nets and how long it will take. If that income/time is
|
||||
better than what we've found before, we update the metrics and then continue.
|
||||
|
||||
Unlike the previous version, this one checks every value. Between that and the loop to simulate assigning
|
||||
jobs, this is a very heavy algorithm that can take seconds to execute if done synchronously. To prevent it
|
||||
from freezing the game, we run it asynchronously and sleep after checking each value.
|
||||
*/
|
||||
const batchCount = ramNet.testThreads(threadCosts);
|
||||
const income = tGreed * maxMoney * batchCount / (metrics.spacer * 4 * batchCount + wTime);
|
||||
if (income > best) {
|
||||
best = income;
|
||||
metrics.greed = tGreed;
|
||||
metrics.depth = batchCount;
|
||||
}
|
||||
}
|
||||
await ns.sleep(0);
|
||||
greed -= stepValue;
|
||||
}
|
||||
// Added the check here to only throw an error if we failed to find any valid configurations.
|
||||
if (best === 0) throw new Error("Not enough ram to run even a single batch. Something has gone seriously wrong.");
|
||||
}
|
||||
7
local/path/home/corp/Autosell.js
Normal file
7
local/path/home/corp/Autosell.js
Normal file
@@ -0,0 +1,7 @@
|
||||
/** @param {NS} ns */
|
||||
export async function main(ns) {
|
||||
let cities = ["Sector-12", "Aevum", "Volhaven", "Chongqing", "New Tokyo", "Ishima"];
|
||||
let corpName = "AgraNeo";
|
||||
ns.tprint(ns.corporation.getMaterial(corpName,cities[0],"Plants"))
|
||||
ns.corporation.sellMaterial()
|
||||
}
|
||||
16
local/path/home/corp/HireWorkers.js
Normal file
16
local/path/home/corp/HireWorkers.js
Normal file
@@ -0,0 +1,16 @@
|
||||
/** @param {NS} ns */
|
||||
export async function main(ns) {
|
||||
let cities = ["Sector-12", "Aevum", "Volhaven", "Chongqing", "New Tokyo", "Ishima"];
|
||||
let corpName = "ChemNeo";
|
||||
let currentSize = 0;
|
||||
for (let city of cities) {
|
||||
|
||||
let currentOffice=(ns.corporation.getOffice(corpName, city));
|
||||
if (currentOffice.numEmployees < currentOffice.size) {
|
||||
(currentOffice.employeeJobs.Operations < 1) ? ns.corporation.hireEmployee(corpName, city,"Operations") : "";
|
||||
(currentOffice.employeeJobs.Engineer < 1) ? ns.corporation.hireEmployee(corpName, city,"Engineer") : "";
|
||||
(currentOffice.employeeJobs.Business < 1) ? ns.corporation.hireEmployee(corpName, city,"Business") : "";
|
||||
(currentOffice.employeeJobs.Management < 1) ? ns.corporation.hireEmployee(corpName, city,"Management") : "";
|
||||
};
|
||||
}
|
||||
}
|
||||
13
local/path/home/corp/SetupExport.js
Normal file
13
local/path/home/corp/SetupExport.js
Normal file
@@ -0,0 +1,13 @@
|
||||
/** @param {NS} ns */
|
||||
export async function main(ns) {
|
||||
let cities = ["Sector-12", "Aevum", "Volhaven", "Chongqing", "New Tokyo", "Ishima"];
|
||||
let corpName1 = ["AgraNeo","Plants"];
|
||||
let corpName2 = ["ChemNeo","Chemicals"];
|
||||
let exportString = "IPROD*-1"
|
||||
for (let city of cities) {
|
||||
ns.corporation.cancelExportMaterial(corpName1[0],city,corpName2[0],city,corpName1[1]);
|
||||
ns.corporation.cancelExportMaterial(corpName2[0],city,corpName1[0],city,corpName2[1]);
|
||||
ns.corporation.exportMaterial(corpName1[0],city,corpName2[0],city,corpName1[1],exportString);
|
||||
ns.corporation.exportMaterial(corpName2[0],city,corpName1[0],city,corpName2[1],exportString);
|
||||
}
|
||||
}
|
||||
6
local/path/home/corp/Smart.js
Normal file
6
local/path/home/corp/Smart.js
Normal file
@@ -0,0 +1,6 @@
|
||||
/** @param {NS} ns */
|
||||
export async function main(ns) {
|
||||
let [corpName, city] = ns.args;
|
||||
//ns.corporation.setSmartSupply(corpName, city, true);
|
||||
return true;
|
||||
}
|
||||
12
local/path/home/corp/UpgradeOffice.js
Normal file
12
local/path/home/corp/UpgradeOffice.js
Normal file
@@ -0,0 +1,12 @@
|
||||
/** @param {NS} ns */
|
||||
export async function main(ns) {
|
||||
let cities = ["Sector-12", "Aevum", "Volhaven", "Chongqing", "New Tokyo", "Ishima"];
|
||||
let corpName = "ChemNeo";
|
||||
let currentSize = 0;
|
||||
for (let city of cities) {
|
||||
currentSize = ns.corporation.getOffice(corpName, city).size;
|
||||
if (currentSize < 4) {
|
||||
ns.corporation.upgradeOfficeSize(corpName, city, 4 - currentSize);
|
||||
};
|
||||
}
|
||||
}
|
||||
8
local/path/home/killAllScript.js
Normal file
8
local/path/home/killAllScript.js
Normal file
@@ -0,0 +1,8 @@
|
||||
/** @param {NS} ns */
|
||||
export async function main(ns) {
|
||||
let serverList = JSON.parse(ns.read("serverList.txt"));
|
||||
for (const [name, entry] of Object.entries(serverList)) {
|
||||
|
||||
ns.killall(name, true)
|
||||
}
|
||||
}
|
||||
4
local/path/home/newfile.js
Normal file
4
local/path/home/newfile.js
Normal file
@@ -0,0 +1,4 @@
|
||||
/** @param {NS} ns */
|
||||
export async function main(ns) {
|
||||
ns.tprint('success')
|
||||
}
|
||||
5
local/path/home/purchaseServers.js
Normal file
5
local/path/home/purchaseServers.js
Normal file
@@ -0,0 +1,5 @@
|
||||
import { getCracks, scanServerList, findBestTarget, crackingAndRooting, copyAndRunScript, purchaseAndUpgradeServers } from "/RamsesUtils.js";
|
||||
/** @param {NS} ns */
|
||||
export async function main(ns) {
|
||||
await purchaseAndUpgradeServers(ns);
|
||||
}
|
||||
164
local/path/home/removefiles.js
Normal file
164
local/path/home/removefiles.js
Normal file
@@ -0,0 +1,164 @@
|
||||
/** @param {NS} ns */
|
||||
export async function main(ns) {
|
||||
let files = ["contract-115802.cct",
|
||||
"contract-121862.cct",
|
||||
"contract-124253.cct",
|
||||
"contract-130050.cct",
|
||||
"contract-132458.cct",
|
||||
"contract-133951.cct",
|
||||
"contract-137578.cct",
|
||||
"contract-140971.cct",
|
||||
"contract-141455.cct",
|
||||
"contract-143455.cct",
|
||||
"contract-160840.cct",
|
||||
"contract-16178.cct",
|
||||
"contract-166840.cct",
|
||||
"contract-171215.cct",
|
||||
"contract-173050.cct",
|
||||
"contract-17770.cct",
|
||||
"contract-18028.cct",
|
||||
"contract-183510.cct",
|
||||
"contract-195657.cct",
|
||||
"contract-202801.cct",
|
||||
"contract-204367.cct",
|
||||
"contract-217301.cct",
|
||||
"contract-221818.cct",
|
||||
"contract-230038.cct",
|
||||
"contract-236291.cct",
|
||||
"contract-241097.cct",
|
||||
"contract-242406.cct",
|
||||
"contract-253624.cct",
|
||||
"contract-25923.cct",
|
||||
"contract-265811.cct",
|
||||
"contract-267921.cct",
|
||||
"contract-275503.cct",
|
||||
"contract-278600.cct",
|
||||
"contract-279485.cct",
|
||||
"contract-280015.cct",
|
||||
"contract-280601.cct",
|
||||
"contract-286276.cct",
|
||||
"contract-286508.cct",
|
||||
"contract-298018.cct",
|
||||
"contract-30149.cct",
|
||||
"contract-302603.cct",
|
||||
"contract-305569.cct",
|
||||
"contract-322138.cct",
|
||||
"contract-323283.cct",
|
||||
"contract-328409.cct",
|
||||
"contract-328979.cct",
|
||||
"contract-334003.cct",
|
||||
"contract-36206.cct",
|
||||
"contract-375990.cct",
|
||||
"contract-376805.cct",
|
||||
"contract-410024.cct",
|
||||
"contract-413055.cct",
|
||||
"contract-423941.cct",
|
||||
"contract-427686.cct",
|
||||
"contract-441619.cct",
|
||||
"contract-446103.cct",
|
||||
"contract-448094.cct",
|
||||
"contract-467871.cct",
|
||||
"contract-480431.cct",
|
||||
"contract-505241.cct",
|
||||
"contract-516679.cct",
|
||||
"contract-519369.cct",
|
||||
"contract-529643.cct",
|
||||
"contract-535021.cct",
|
||||
"contract-535336.cct",
|
||||
"contract-547419.cct",
|
||||
"contract-560001.cct",
|
||||
"contract-564079.cct",
|
||||
"contract-570111.cct",
|
||||
"contract-570844.cct",
|
||||
"contract-573534.cct",
|
||||
"contract-576739.cct",
|
||||
"contract-580202.cct",
|
||||
"contract-584555.cct",
|
||||
"contract-586489.cct",
|
||||
"contract-592906.cct",
|
||||
"contract-599940.cct",
|
||||
"contract-600802.cct",
|
||||
"contract-603840.cct",
|
||||
"contract-605640.cct",
|
||||
"contract-6060.cct",
|
||||
"contract-606205.cct",
|
||||
"contract-610194.cct",
|
||||
"contract-619856.cct",
|
||||
"contract-631275.cct",
|
||||
"contract-6317.cct",
|
||||
"contract-653136.cct",
|
||||
"contract-655415.cct",
|
||||
"contract-658731.cct",
|
||||
"contract-662427.cct",
|
||||
"contract-663124.cct",
|
||||
"contract-663518.cct",
|
||||
"contract-669853.cct",
|
||||
"contract-671683.cct",
|
||||
"contract-676164.cct",
|
||||
"contract-677643.cct",
|
||||
"contract-681060.cct",
|
||||
"contract-683911.cct",
|
||||
"contract-685393.cct",
|
||||
"contract-695727.cct",
|
||||
"contract-696156.cct",
|
||||
"contract-703758.cct",
|
||||
"contract-720460.cct",
|
||||
"contract-722083.cct",
|
||||
"contract-727788.cct",
|
||||
"contract-735210.cct",
|
||||
"contract-736394.cct",
|
||||
"contract-736483.cct",
|
||||
"contract-748113.cct",
|
||||
"contract-751169.cct",
|
||||
"contract-752502.cct",
|
||||
"contract-765155.cct",
|
||||
"contract-772173.cct",
|
||||
"contract-773439.cct",
|
||||
"contract-77492.cct",
|
||||
"contract-778492.cct",
|
||||
"contract-784712.cct",
|
||||
"contract-785014.cct",
|
||||
"contract-786215.cct",
|
||||
"contract-789483.cct",
|
||||
"contract-7918.cct",
|
||||
"contract-796855.cct",
|
||||
"contract-800839.cct",
|
||||
"contract-801748.cct",
|
||||
"contract-81208.cct",
|
||||
"contract-817514.cct",
|
||||
"contract-82882.cct",
|
||||
"contract-843473.cct",
|
||||
"contract-843884.cct",
|
||||
"contract-847170.cct",
|
||||
"contract-847956.cct",
|
||||
"contract-848049.cct",
|
||||
"contract-856399.cct",
|
||||
"contract-862326.cct",
|
||||
"contract-866043.cct",
|
||||
"contract-866539.cct",
|
||||
"contract-870914.cct",
|
||||
"contract-887241.cct",
|
||||
"contract-893688.cct",
|
||||
"contract-89945.cct",
|
||||
"contract-900580.cct",
|
||||
"contract-915646.cct",
|
||||
"contract-918325.cct",
|
||||
"contract-9193.cct",
|
||||
"contract-921551.cct",
|
||||
"contract-942582.cct",
|
||||
"contract-945836.cct",
|
||||
"contract-947944.cct",
|
||||
"contract-954121.cct",
|
||||
"contract-957901.cct",
|
||||
"contract-960362.cct",
|
||||
"contract-963099.cct",
|
||||
"contract-965221.cct",
|
||||
"contract-979556.cct",
|
||||
"contract-985969.cct",
|
||||
"contract-992733.cct",
|
||||
"contract-996245.cct",
|
||||
"contract-997464.cct"];
|
||||
for (let file of files) {
|
||||
ns.rm(file)
|
||||
}
|
||||
}
|
||||
1
local/path/home/serverList.txt
Normal file
1
local/path/home/serverList.txt
Normal file
File diff suppressed because one or more lines are too long
24
local/path/home/tGrow.js
Normal file
24
local/path/home/tGrow.js
Normal file
@@ -0,0 +1,24 @@
|
||||
/*
|
||||
Workers are mostly the same, aside from uncommented portions allowing the hack and grow workers to report.
|
||||
I've also generally commented out the terminal logging, as it gets rather laggy when there's a lot of scripts
|
||||
writing to terminal.
|
||||
*/
|
||||
|
||||
/** @param {NS} ns */
|
||||
export async function main(ns) {
|
||||
const job = JSON.parse(ns.args[0]);
|
||||
let delay = job.end - job.time - Date.now();
|
||||
if (delay < 0) {
|
||||
ns.tprint(`WARN: Batch ${job.batch} ${job.type} was ${-delay}ms late. (${job.end})\n`);
|
||||
ns.writePort(ns.pid, -delay);
|
||||
delay = 0;
|
||||
} else {
|
||||
ns.writePort(ns.pid, 0);
|
||||
}
|
||||
await ns.grow(job.target, { additionalMsec: delay });
|
||||
const end = Date.now();
|
||||
ns.atExit(() => {
|
||||
if (job.report) ns.writePort(job.port, job.type + job.server);
|
||||
// ns.tprint(`Batch ${job.batch}: ${job.type} finished at ${end.toString().slice(-6)}/${Math.round(job.end).toString().slice(-6)}\n`);
|
||||
});
|
||||
}
|
||||
24
local/path/home/tHack.js
Normal file
24
local/path/home/tHack.js
Normal file
@@ -0,0 +1,24 @@
|
||||
/*
|
||||
Workers are mostly the same, aside from uncommented portions allowing the hack and grow workers to report.
|
||||
I've also generally commented out the terminal logging, as it gets rather laggy when there's a lot of scripts
|
||||
writing to terminal.
|
||||
*/
|
||||
|
||||
/** @param {NS} ns */
|
||||
export async function main(ns) {
|
||||
const job = JSON.parse(ns.args[0]);
|
||||
let delay = job.end - job.time - Date.now();
|
||||
if (delay < 0) {
|
||||
ns.tprint(`WARN: Batch ${job.batch} ${job.type} was ${-delay}ms late. (${job.end})\n`);
|
||||
ns.writePort(ns.pid, -delay);
|
||||
delay = 0;
|
||||
} else {
|
||||
ns.writePort(ns.pid, 0);
|
||||
}
|
||||
await ns.hack(job.target, { additionalMsec: delay });
|
||||
const end = Date.now();
|
||||
ns.atExit(() => {
|
||||
if (job.report) ns.writePort(job.port, job.type + job.server);
|
||||
// ns.tprint(`Batch ${job.batch}: ${job.type} finished at ${end.toString().slice(-6)}/${Math.round(job.end).toString().slice(-6)}\n`);
|
||||
});
|
||||
}
|
||||
28
local/path/home/tWeaken.js
Normal file
28
local/path/home/tWeaken.js
Normal file
@@ -0,0 +1,28 @@
|
||||
/*
|
||||
Workers are mostly the same, aside from uncommented portions allowing the hack and grow workers to report.
|
||||
I've also generally commented out the terminal logging, as it gets rather laggy when there's a lot of scripts
|
||||
writing to terminal.
|
||||
*/
|
||||
|
||||
/** @param {NS} ns */
|
||||
export async function main(ns) {
|
||||
const job = JSON.parse(ns.args[0]);
|
||||
|
||||
let delay = job.end - job.time - Date.now();
|
||||
if (delay < 0) {
|
||||
// We write back to the controller if jobs are delayed so that it can adjust the other jobs to match.
|
||||
ns.tprint(`WARN: Batch ${job.batch} ${job.type} was ${-delay}ms late. (${job.end})\n`);
|
||||
ns.writePort(ns.pid, -delay);
|
||||
delay = 0;
|
||||
} else {
|
||||
ns.writePort(ns.pid, 0);
|
||||
}
|
||||
await ns.weaken(job.target, { additionalMsec: delay });
|
||||
const end = Date.now();
|
||||
|
||||
// Write back to let the controller know that we're done.
|
||||
ns.atExit(() => {
|
||||
if (job.report) ns.writePort(job.port, job.type + job.server);
|
||||
// ns.tprint(`Batch ${job.batch}: ${job.type} finished at ${end.toString().slice(-6)}/${Math.round(job.end).toString().slice(-6)}\n`);
|
||||
});
|
||||
}
|
||||
4
local/path/home/template.js
Normal file
4
local/path/home/template.js
Normal file
@@ -0,0 +1,4 @@
|
||||
export async function main(ns) {
|
||||
ns.tprint("Hello World!");
|
||||
}
|
||||
//# sourceMappingURL=data:application/json;base64,eyJ2ZXJzaW9uIjozLCJzb3VyY2VzIjpbIkM6L2dhbWVzL0JpdEJ1cm5lckdpdC92aXRlYnVybmVyLXRlbXBsYXRlL3NyYy90ZW1wbGF0ZS50cyJdLCJzb3VyY2VzQ29udGVudCI6WyJpbXBvcnQgeyBOUyB9IGZyb20gJ0Bucyc7XHJcblxyXG5leHBvcnQgYXN5bmMgZnVuY3Rpb24gbWFpbihuczogTlMpIHtcclxuICBucy50cHJpbnQoJ0hlbGxvIFdvcmxkIScpO1xyXG59XHJcbiJdLCJtYXBwaW5ncyI6IkFBRUEsc0JBQXNCLEtBQUssSUFBUTtBQUNqQyxLQUFHLE9BQU8sY0FBYztBQUMxQjsiLCJuYW1lcyI6W119
|
||||
161
local/path/home/test.js
Normal file
161
local/path/home/test.js
Normal file
@@ -0,0 +1,161 @@
|
||||
/** @param {NS} ns */
|
||||
export async function main(ns) {
|
||||
ns.tprint(ns.codingcontract.getContractTypes())
|
||||
let testcontract = ns.codingcontract.createDummyContract("Total Ways to Sum")
|
||||
let contractType = ns.codingcontract.getContractType(testcontract);
|
||||
ns.tprint(ns.codingcontract.getDescription(testcontract))
|
||||
let n = ns.codingcontract.getData(testcontract);
|
||||
ns.tprint("Data: " + n);
|
||||
let answer = "";
|
||||
if (contractType === "Find Largest Prime Factor") {
|
||||
answer = largestPrimeFactor(n);
|
||||
}
|
||||
if (contractType === "Subarray with Maximum Sum") {
|
||||
answer = SubarrayWithMaximumSum(ns, n)
|
||||
}
|
||||
if (contractType === "Total Ways to Sum") {
|
||||
answer = TotalWaysToSum(ns, n)
|
||||
}
|
||||
|
||||
|
||||
ns.tprint(answer);
|
||||
|
||||
ns.tprint(ns.codingcontract.attempt(answer, testcontract));
|
||||
}
|
||||
/*
|
||||
5:
|
||||
4 1
|
||||
3 2
|
||||
3 1 1
|
||||
2 2 1
|
||||
2 1 1 1
|
||||
1 1 1 1 1
|
||||
|
||||
6:
|
||||
5 1
|
||||
4 2
|
||||
4 1 1
|
||||
3 3
|
||||
3 2 1
|
||||
3 1 1 1
|
||||
2 2 2
|
||||
2 2 1 1
|
||||
2 1 1 1 1
|
||||
1 1 1 1 1 1
|
||||
|
||||
# Start with one position m filling it with the integers between 1 and target
|
||||
# For each m, fill the next position n with integers between 1 and m
|
||||
# Repeat as long as the sum is smaller than target.
|
||||
# append all iterations to the Array and count
|
||||
*/
|
||||
|
||||
function TotalWaysToSum(ns, target) {
|
||||
let sumArray = [];
|
||||
let inputArray = [];
|
||||
let unfinishedArray = [];
|
||||
let rollingSum = 0;
|
||||
for (let i = 1; i < target; i++) {
|
||||
inputArray.push([i]);
|
||||
}
|
||||
let z = 1
|
||||
while (inputArray.length > 0) {
|
||||
z++
|
||||
inputArray.forEach((element) => {
|
||||
rollingSum = element.reduce((a, b) => a + b, 0);
|
||||
if (rollingSum === target) {
|
||||
sumArray.push(element)
|
||||
} else {
|
||||
|
||||
for (let k = 1; k <= element[element.length-1] && k <= target - rollingSum; k++) {
|
||||
|
||||
unfinishedArray.push(element.concat([k]))
|
||||
}
|
||||
}
|
||||
}
|
||||
)
|
||||
inputArray = unfinishedArray;
|
||||
}
|
||||
ns.tprint("Target: " +target)
|
||||
ns.tprint("Length: " + sumArray.length)
|
||||
return sumArray.length
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
function SubarrayWithMaximumSum(ns, givenArray) {
|
||||
let arrayLength = givenArray.length;
|
||||
let maxSum = -10000;
|
||||
let runningSum = 0;
|
||||
for (let i = 1; i <= arrayLength; i++) {
|
||||
for (let j = 0; j <= arrayLength - i; j++) {
|
||||
runningSum = eval(givenArray.slice(j, i + j).join('+'));
|
||||
//ns.tprint("i: "+i+ " j: "+ j + " Array: "+givenArray.slice(j,i+j)+ " eval: "+ givenArray.slice(j,i+j).join('+')+"runningSum: "+runningSum);
|
||||
if (maxSum < runningSum) { maxSum = runningSum };
|
||||
}
|
||||
}
|
||||
return maxSum
|
||||
}
|
||||
|
||||
|
||||
function FindLargestPrimeFactor(number) {
|
||||
|
||||
let factor = 2;
|
||||
while (factor * factor <= number) {
|
||||
if (number % factor === 0) {
|
||||
number /= factor;
|
||||
} else {
|
||||
factor++
|
||||
}
|
||||
}
|
||||
return number;
|
||||
}
|
||||
|
||||
/*
|
||||
function FindLargestPrimeFactor(n) {
|
||||
let x = Math.ceil(Math.random()*10);
|
||||
let y = x;
|
||||
let d = 1;
|
||||
|
||||
while (d === 1) {
|
||||
x = g(x, n);
|
||||
y = g(g(y, n), n)
|
||||
d = gcd(n, Math.abs(x - y))
|
||||
//ns.tprint("x:" + x + " y: " + y + " d: " + d)
|
||||
}
|
||||
if (d === n) {
|
||||
return ("failure")
|
||||
}
|
||||
else {
|
||||
return (d)
|
||||
}
|
||||
}
|
||||
|
||||
function g(x, n) {
|
||||
return (x * x) % n
|
||||
}
|
||||
|
||||
function gcd(a,b) {
|
||||
a = Math.abs(a);
|
||||
b = Math.abs(b);
|
||||
if (b > a) {var temp = a; a = b; b = temp;}
|
||||
while (true) {
|
||||
if (b == 0) return a;
|
||||
a %= b;
|
||||
if (a == 0) return b;
|
||||
b %= a;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
function gcd(a, b) {
|
||||
if (!b) {
|
||||
return a;
|
||||
}
|
||||
return gcd(b, a % b);
|
||||
}
|
||||
*/
|
||||
218
local/path/home/utils.js
Normal file
218
local/path/home/utils.js
Normal file
@@ -0,0 +1,218 @@
|
||||
/*
|
||||
This file remains unchanged from the previous part, aside from updating the file paths.
|
||||
I didn't even bother removing the old comments.
|
||||
*/
|
||||
|
||||
/** @param {NS} ns */
|
||||
export async function main(ns) {
|
||||
ns.tprint("This is just a function library, it doesn't do anything.");
|
||||
}
|
||||
|
||||
// The recursive server navigation algorithm. The lambda predicate determines which servers to add to the final list.
|
||||
// You can also plug other functions into the lambda to perform other tasks that check all servers at the same time.
|
||||
/** @param {NS} ns */
|
||||
export function getServers(ns, lambdaCondition = () => true, hostname = "home", servers = [], visited = []) {
|
||||
if (visited.includes(hostname)) return;
|
||||
visited.push(hostname);
|
||||
if (lambdaCondition(hostname)) servers.push(hostname);
|
||||
const connectedNodes = ns.scan(hostname);
|
||||
if (hostname !== "home") connectedNodes.shift();
|
||||
for (const node of connectedNodes) getServers(ns, lambdaCondition, node, servers, visited);
|
||||
return servers;
|
||||
}
|
||||
|
||||
// Here are a couple of my own getServers modules.
|
||||
// This one finds the best target for hacking. It tries to balance expected return with time taken.
|
||||
/** @param {NS} ns */
|
||||
export function checkTarget(ns, server, target = "n00dles", forms = false) {
|
||||
if (!ns.hasRootAccess(server)) return target;
|
||||
const player = ns.getPlayer();
|
||||
const serverSim = ns.getServer(server);
|
||||
const pSim = ns.getServer(target);
|
||||
let previousScore;
|
||||
let currentScore;
|
||||
if (serverSim.requiredHackingSkill <= player.skills.hacking / (forms ? 1 : 2)) {
|
||||
if (forms) {
|
||||
serverSim.hackDifficulty = serverSim.minDifficulty;
|
||||
pSim.hackDifficulty = pSim.minDifficulty;
|
||||
previousScore = pSim.moneyMax / ns.formulas.hacking.weakenTime(pSim, player) * ns.formulas.hacking.hackChance(pSim, player);
|
||||
currentScore = serverSim.moneyMax / ns.formulas.hacking.weakenTime(serverSim, player) * ns.formulas.hacking.hackChance(serverSim, player);
|
||||
} else {
|
||||
previousScore = pSim.moneyMax / pSim.minDifficulty / ns.getWeakenTime(pSim.hostname);
|
||||
currentScore = serverSim.moneyMax / serverSim.minDifficulty / ns.getWeakenTime(serverSim.hostname);
|
||||
}
|
||||
if (currentScore > previousScore) target = server;
|
||||
}
|
||||
return target;
|
||||
}
|
||||
|
||||
// A simple function for copying a list of scripts to a server.
|
||||
/** @param {NS} ns */
|
||||
export function copyScripts(ns, server, scripts, overwrite = false) {
|
||||
for (const script of scripts) {
|
||||
if ((!ns.fileExists(script, server) || overwrite) && ns.hasRootAccess(server)) {
|
||||
ns.scp(script, server);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// A generic function to check that a given server is prepped. Mostly just a convenience.
|
||||
export function isPrepped(ns, server) {
|
||||
const tolerance = 0.0001;
|
||||
const maxMoney = ns.getServerMaxMoney(server);
|
||||
const money = ns.getServerMoneyAvailable(server);
|
||||
const minSec = ns.getServerMinSecurityLevel(server);
|
||||
const sec = ns.getServerSecurityLevel(server);
|
||||
const secFix = Math.abs(sec - minSec) < tolerance;
|
||||
return (money === maxMoney && secFix) ? true : false;
|
||||
}
|
||||
|
||||
/*
|
||||
This prep function isn't part of the tutorial, but the rest of the code wouldn't work without it.
|
||||
I don't make any guarantees, but I've been using it and it's worked well enough. I'll comment it anyway.
|
||||
The prep strategy uses a modified proto-batching technique, which will be covered in part 2.
|
||||
*/
|
||||
/** @param {NS} ns */
|
||||
export async function prep(ns, values, ramNet) {
|
||||
const maxMoney = values.maxMoney;
|
||||
const minSec = values.minSec;
|
||||
let money = values.money;
|
||||
let sec = values.sec;
|
||||
while (!isPrepped(ns, values.target)) {
|
||||
const wTime = ns.getWeakenTime(values.target);
|
||||
const gTime = wTime * 0.8;
|
||||
const dataPort = ns.getPortHandle(ns.pid);
|
||||
dataPort.clear();
|
||||
|
||||
const pRam = ramNet.cloneBlocks();
|
||||
const maxThreads = Math.floor(ramNet.maxBlockSize / 1.75);
|
||||
const totalThreads = ramNet.prepThreads;
|
||||
let wThreads1 = 0;
|
||||
let wThreads2 = 0;
|
||||
let gThreads = 0;
|
||||
let batchCount = 1;
|
||||
let script, mode;
|
||||
/*
|
||||
Modes:
|
||||
0: Security only
|
||||
1: Money only
|
||||
2: One shot
|
||||
*/
|
||||
|
||||
if (money < maxMoney) {
|
||||
gThreads = Math.ceil(ns.growthAnalyze(values.target, maxMoney / money));
|
||||
wThreads2 = Math.ceil(ns.growthAnalyzeSecurity(gThreads) / 0.05);
|
||||
}
|
||||
if (sec > minSec) {
|
||||
wThreads1 = Math.ceil((sec - minSec) * 20);
|
||||
if (!(wThreads1 + wThreads2 + gThreads <= totalThreads && gThreads <= maxThreads)) {
|
||||
gThreads = 0;
|
||||
wThreads2 = 0;
|
||||
batchCount = Math.ceil(wThreads1 / totalThreads);
|
||||
if (batchCount > 1) wThreads1 = totalThreads;
|
||||
mode = 0;
|
||||
} else mode = 2;
|
||||
} else if (gThreads > maxThreads || gThreads + wThreads2 > totalThreads) {
|
||||
mode = 1;
|
||||
const oldG = gThreads;
|
||||
wThreads2 = Math.max(Math.floor(totalThreads / 13.5), 1);
|
||||
gThreads = Math.floor(wThreads2 * 12.5);
|
||||
batchCount = Math.ceil(oldG / gThreads);
|
||||
} else mode = 2;
|
||||
|
||||
// Big buffer here, since all the previous calculations can take a while. One second should be more than enough.
|
||||
const wEnd1 = Date.now() + wTime + 1000;
|
||||
const gEnd = wEnd1 + values.spacer;
|
||||
const wEnd2 = gEnd + values.spacer;
|
||||
|
||||
// "metrics" here is basically a mock Job object. Again, this is just an artifact of repurposed old code.
|
||||
const metrics = {
|
||||
batch: "prep",
|
||||
target: values.target,
|
||||
type: "none",
|
||||
time: 0,
|
||||
end: 0,
|
||||
port: ns.pid,
|
||||
log: values.log,
|
||||
report: false
|
||||
};
|
||||
|
||||
// Actually assigning threads. We actually allow grow threads to be spread out in mode 1.
|
||||
// This is because we don't mind if the effect is a bit reduced from higher security unlike a normal batcher.
|
||||
// We're not trying to grow a specific amount, we're trying to grow as much as possible.
|
||||
for (const block of pRam) {
|
||||
while (block.ram >= 1.75) {
|
||||
const bMax = Math.floor(block.ram / 1.75)
|
||||
let threads = 0;
|
||||
if (wThreads1 > 0) {
|
||||
script = "tWeaken.js";
|
||||
metrics.type = "pWeaken1";
|
||||
metrics.time = wTime;
|
||||
metrics.end = wEnd1;
|
||||
threads = Math.min(wThreads1, bMax);
|
||||
if (wThreads2 === 0 && wThreads1 - threads <= 0) metrics.report = true;
|
||||
wThreads1 -= threads;
|
||||
} else if (wThreads2 > 0) {
|
||||
script = "tWeaken.js";
|
||||
metrics.type = "pWeaken2";
|
||||
metrics.time = wTime;
|
||||
metrics.end = wEnd2;
|
||||
threads = Math.min(wThreads2, bMax);
|
||||
if (wThreads2 - threads === 0) metrics.report = true;
|
||||
wThreads2 -= threads;
|
||||
} else if (gThreads > 0 && mode === 1) {
|
||||
script = "tGrow.js";
|
||||
metrics.type = "pGrow";
|
||||
metrics.time = gTime;
|
||||
metrics.end = gEnd;
|
||||
threads = Math.min(gThreads, bMax);
|
||||
metrics.report = false;
|
||||
gThreads -= threads;
|
||||
} else if (gThreads > 0 && bMax >= gThreads) {
|
||||
script = "tGrow.js";
|
||||
metrics.type = "pGrow";
|
||||
metrics.time = gTime;
|
||||
metrics.end = gEnd;
|
||||
threads = gThreads;
|
||||
metrics.report = false;
|
||||
gThreads = 0;
|
||||
} else break;
|
||||
metrics.server = block.server;
|
||||
const pid = ns.exec(script, block.server, { threads: threads, temporary: true }, JSON.stringify(metrics));
|
||||
if (!pid) throw new Error("Unable to assign all jobs.");
|
||||
block.ram -= 1.75 * threads;
|
||||
}
|
||||
}
|
||||
|
||||
// Fancy UI stuff to update you on progress.
|
||||
const tEnd = ((mode === 0 ? wEnd1 : wEnd2) - Date.now()) * batchCount + Date.now();
|
||||
const timer = setInterval(() => {
|
||||
ns.clearLog();
|
||||
switch (mode) {
|
||||
case 0:
|
||||
ns.print(`Weakening security on ${values.target}...`);
|
||||
break;
|
||||
case 1:
|
||||
ns.print(`Maximizing money on ${values.target}...`);
|
||||
break;
|
||||
case 2:
|
||||
ns.print(`Finalizing preparation on ${values.target}...`);
|
||||
}
|
||||
ns.print(`Security: +${ns.formatNumber(sec - minSec, 3)}`);
|
||||
ns.print(`Money: \$${ns.formatNumber(money, 2)}/${ns.formatNumber(maxMoney, 2)}`);
|
||||
const time = tEnd - Date.now();
|
||||
ns.print(`Estimated time remaining: ${ns.tFormat(time)}`);
|
||||
ns.print(`~${batchCount} ${(batchCount === 1) ? "batch" : "batches"}.`);
|
||||
}, 200);
|
||||
ns.atExit(() => clearInterval(timer));
|
||||
|
||||
// Wait for the last weaken to finish.
|
||||
do await dataPort.nextWrite(); while (!dataPort.read().startsWith("pWeaken"));
|
||||
clearInterval(timer);
|
||||
await ns.sleep(100);
|
||||
|
||||
money = ns.getServerMoneyAvailable(values.target);
|
||||
sec = ns.getServerSecurityLevel(values.target);
|
||||
}
|
||||
return true;
|
||||
}
|
||||
Reference in New Issue
Block a user