Ramses Script Cleanup

This commit is contained in:
Philipp
2024-10-05 19:26:43 +02:00
parent 53e9822d01
commit 2560c925d2
30 changed files with 0 additions and 1882 deletions

View File

@@ -1,6 +0,0 @@
/** @param {NS} ns */
export async function main(ns) {
while (true) {
await ns.share();
}
}

View File

@@ -1,16 +0,0 @@
/** @param {NS} ns */
export async function main(ns) {
let serverList = JSON.parse(ns.read("serverList.txt"));
for (const [name, entry] of Object.entries(serverList)) {
if (entry.rootAccess === true) {
ns.print("killed threads on: " + name + " " +ns.killall(name, true));
//move script and run
let maxRam = ns.getServerMaxRam(name);
if (maxRam > 0) {
ns.scp("FactionBoost.js", name, "home");
let maxProcesses = 1;
maxProcesses = Math.floor(maxRam / 4);
if (name === "home") maxProcesses = maxProcesses - 50;
ns.exec("FactionBoost.js", name, maxProcesses);
}}}}

View File

@@ -1,70 +0,0 @@
/** @param {NS} ns */
export async function main(ns) {
//args
const sTarget = ns.args[0]; // target server
// declare objects
const oHome = ns.getServer("home");
//const oTarget = ns.getServer(sTarget);
//declare variables
const sWeakenScript = "RMweaken.js";
const sBatch = "RMcontroller.js";
const nCores = oHome.cpuCores;
let nSecurity = ns.getServerSecurityLevel(sTarget);
const nMinSecurity = ns.getServerMinSecurityLevel(sTarget);
const nWeakenSTR = ns.weakenAnalyze(1, nCores);
let nThreads = Math.ceil((nSecurity - nMinSecurity) / nWeakenSTR);
let nFreeRam = ns.getServerMaxRam("home") - ns.getServerUsedRam("home");
ns.tail("RMbreach.js", "home", sTarget);
//ns.resizeTail(815, 395);
//ns.moveTail(1925, 0);
// crack target
// ns.run(sCrack, 1, sTarget);
if (nThreads > 0 && nSecurity > nMinSecurity) {
const nDelay = ns.getWeakenTime(sTarget);
ns.tprint("current security is: " + nSecurity);
ns.tprint("minimum security is: " + nMinSecurity);
ns.tprint("threads needed for weaken: " + nThreads);
ns.tprint(nThreads + " will reduce Security by " + ns.weakenAnalyze(nThreads, nCores));
let nScriptRAM = ns.getScriptRam(sWeakenScript, "home");
let nRequiredRAM = nScriptRAM * nThreads;
ns.tprint(nThreads + " of " + sWeakenScript + " requires " + nRequiredRAM + " GB of RAM");
ns.tprint("weakening will take " + (nDelay / 1000 / 60) + " minutes");
if (nFreeRam > nRequiredRAM) {
ns.run(sWeakenScript, nThreads, sTarget);
await ns.sleep(Math.ceil(nDelay));
nSecurity = ns.getServerSecurityLevel(sTarget);
ns.tprint("Breach complete, security level is now at: " + nSecurity);
}
else {
ns.print("not enough RAM to run all threads at once, splitting into smaller chunks...");
while (nSecurity > nMinSecurity) {
nThreads /= (1+(nRequiredRAM / nFreeRam));
ns.print(Math.ceil(nRequiredRAM / nFreeRam));
ns.print(nThreads);
ns.print(nThreads * nScriptRAM);
ns.run(sWeakenScript, Math.ceil(nThreads), sTarget);
await ns.sleep(Math.ceil(nDelay));
nSecurity = ns.getServerSecurityLevel(sTarget);
}
}
}
//run batch
const nBatchPID = ns.run(sBatch, 1, sTarget);
ns.tail(nBatchPID, "home", sBatch, 1, sTarget);
ns.resizeTail(815, 395, nBatchPID);
ns.moveTail(1925, 0, nBatchPID);
}

View File

@@ -1,126 +0,0 @@
/** @param {NS} ns */
export async function main(ns) {
//Arguments
const sTarget = ns.args[0]; // target server
ns.tail("RMcontroller.js", "home", sTarget);
//Settings
const oHome = ns.getServer("home");
const nCores = oHome.cpuCores;
const sScript = ns.getScriptName();
const sWeaken = "RMweaken.js";
const sGrow = "RMgrow.js";
const sHack = "RMhack.js";
const nScriptRAM = ns.getScriptRam(sScript, "home");
const nWeakenRAM = ns.getScriptRam(sWeaken, "home");
const nGrowRAM = ns.getScriptRam(sGrow, "home");
const nHackRAM = ns.getScriptRam(sHack, "home");
const nHomeUsedRAM = ns.getServerUsedRam("home");
const nHomeMaxRAM = ns.getServerMaxRam("home");
let nHomeFreeRAM = nHomeMaxRAM - nHomeUsedRAM;
const nDelays = [0, 20, 40, 60];
//abort script if sTarget is undefined
if (sTarget === undefined) {
ns.tprint("1st arg sTarget is undefined");
return false;
}
//target server info
const nMinSecurity = ns.getServerMinSecurityLevel(sTarget);
const nMaxMoney = ns.getServerMaxMoney(sTarget);
let nWeakenTime1 = ns.getWeakenTime(sTarget);
let nWeakenTime2 = nWeakenTime1;
let nGrowTime = nWeakenTime1 * 0.8;
let nHackTime = nWeakenTime1 / 4;
//let nHackSecurityGain = ns.hackAnalyzeSecurity(1, sTarget);
//let nHackSecurityGain = 0.002;
//let nHackThreadsEstimate = Math.max(Math.floor(1 / nHackSecurityGain),1);
//let nHackThreadsEstimate = 10;
//ns.tprint("nHackSecurityGain = " + nHackSecurityGain);
//ns.tprint("nHackThreadsEstimate = " + nHackThreadsEstimate);
const nHackTotalRAM = nHackRAM * 25;
//let nGrowSecurityGain = ns.growthAnalyzeSecurity(1, sTarget, nCores);
//let nGrowSecurityGain = 0.004;
//let nGrowThreadsEstimate = Math.max(Math.floor(1 / nGrowSecurityGain),1);
//ns.tprint("nGrowSecurityGain = " + nGrowSecurityGain);
//ns.tprint("nGrowThreadsEstimate = " + nGrowThreadsEstimate);
const nGrowTotalRAM = nGrowRAM * 13;
//let nWeakenSecurity = ns.weakenAnalyze(1, nCores);
//let nWeakenSecurity = 0.05;
//let nWeakenThreadsEstimate = Math.max(Math.ceil(1 / nWeakenSecurity),1);
//ns.tprint("nWeakenSecurity = " + nWeakenSecurity);
//ns.tprint("nWeakenThreadsEstimate = " + nWeakenThreadsEstimate);
const nWeakenTotalRAM = nWeakenRAM * 1;
const nTotalRAM = nHackTotalRAM + nGrowTotalRAM + (nWeakenTotalRAM * 2)
const nTotalBatches = Math.floor((nHomeFreeRAM - nScriptRAM) / nTotalRAM);
let nHackThreadsEstimate = nTotalBatches * 25;
let nWeakenThreadsEstimate1 = nTotalBatches * 1;
let nGrowThreadsEstimate = nTotalBatches * 13;
let nWeakenThreadsEstimate2 = nTotalBatches * 1;
ns.tprint("RAM per Cycle = " + nTotalRAM);
ns.tprint("how many batches can i run at the same time? = " + nTotalBatches);
//await ns.grow(server, { additionalMsec: nMsecDelay });
let nGrowDelay = nWeakenTime1 - nGrowTime;
let nHackDelay = nWeakenTime1 - nHackTime;
const nCycleDuration = nWeakenTime2 + nDelays[3];
ns.tprint("nCycleDuration = " + nCycleDuration);
const nBatchFrequency = Math.ceil(nCycleDuration / nTotalBatches);
ns.tprint("nBatchFrequency = " + nBatchFrequency);
while (true) {
//server stats
let nCurrentSecurity = ns.getServerSecurityLevel(sTarget);
let nCurrentMoney = ns.getServerMoneyAvailable(sTarget);
//timestamp
let currentDate = new Date();
let nOffset;
ns.print("Cash: " + (Math.floor(nCurrentMoney * 1000) / 1000) + " / " + nMaxMoney);
ns.print("Security: " + (Math.floor(nCurrentSecurity * 1000) / 1000) + " / " + nMinSecurity);
//Calculate estimate time of completion
nOffset = ns.getWeakenTime(sTarget);
let nSafeTime = nOffset + nDelays[3]+1000;
let nWeakTime = new Date(currentDate.getTime() + nSafeTime);
let sWeakTime = nWeakTime.toLocaleTimeString('sw-SV'); //swedish time
//Print estimated time of completion
ns.print("Weakening " + sTarget + " Estimated complete at " + sWeakTime);
//hack
const nHackPID = ns.exec(sHack, "home", nHackThreadsEstimate, sTarget, false, nHackDelay + nDelays[0]);
//ns.tail(nHackPID, "home", "home", nHackThreadsEstimate, sTarget, 0, nHackDelay + nDelays[0]);
//weaken 1
const nWeakenPID = ns.exec(sWeaken, "home", nWeakenThreadsEstimate1, sTarget, false, nDelays[1]);
//ns.tail(nWeakenPID, "home", "home", nWeakenThreadsEstimate, sTarget, 0, nDelays[1]);
//grow
const nGrowPID = ns.exec(sGrow, "home", nGrowThreadsEstimate, sTarget, false, nGrowDelay + nDelays[2]);
//ns.tail(nGrowPID, "home", "home", nGrowThreadsEstimate, sTarget, 0, nGrowDelay + nDelays[2]);
//weaken 2
const nWeakenPID2 = ns.exec(sWeaken, "home", nWeakenThreadsEstimate2, sTarget, false, nDelays[3]);
//ns.tail(nWeakenPID2, "home", "home", nWeakenThreadsEstimate, sTarget, 0, nDelays[3]);
await ns.sleep(nSafeTime);
}
}

View File

@@ -1,14 +0,0 @@
/** @param {NS} ns */
export async function main(ns) {
ns.print(Date.now());
const sTarget = ns.args[0]; // target server
const bRepeat = ns.args[1]; // should this script loop
const nMsecDelay = ns.args[2]; // MsecDelay
while (bRepeat === true) {
await ns.grow(sTarget, { additionalMsec: nMsecDelay });
}
await ns.grow(sTarget, { additionalMsec: nMsecDelay });
ns.print(Date.now());
}

View File

@@ -1,14 +0,0 @@
/** @param {NS} ns */
export async function main(ns) {
ns.print(Date.now());
const sTarget = ns.args[0]; // target server
const bRepeat = ns.args[1]; // should this script loop
const nMsecDelay = ns.args[2]; // MsecDelay
while (bRepeat === true) {
await ns.hack(sTarget, { additionalMsec: nMsecDelay });
}
await ns.hack(sTarget, { additionalMsec: nMsecDelay });
ns.print(Date.now());
}

View File

@@ -1,14 +0,0 @@
/** @param {NS} ns */
export async function main(ns) {
ns.print(Date.now());
const sTarget = ns.args[0]; // target server
const bRepeat = ns.args[1]; // should this script loop
const nMsecDelay = ns.args[2]; // MsecDelay
while (bRepeat === true) {
await ns.weaken(sTarget, { additionalMsec: nMsecDelay });
}
await ns.weaken(sTarget, { additionalMsec: nMsecDelay });
ns.print(Date.now());
}

View File

@@ -1,5 +0,0 @@
/** @param {NS} ns */
export async function main(ns) {
const sServer = ns.args[0];
await ns.grow(sServer);
}

View File

@@ -1,5 +0,0 @@
/** @param {NS} ns */
export async function main(ns) {
const sServer = ns.args[0];
await ns.hack(sServer);
}

View File

@@ -1,5 +0,0 @@
/** @param {NS} ns */
export async function main(ns) {
const sServer = ns.args[0];
await ns.weaken(sServer);
}

View File

@@ -1,297 +0,0 @@
/*
Welcome to part 2. I'll only be commenting on things that have changed from the previous part, so if there's something
confusing, be sure to go back and look at part 1 for more detailed explanations.
For part 2, we'll be making a protobatcher. Essentially that means we'll be running our previous version in a constant loop.
To facilitate this, and because otherwise there wouldn't really be much to this part, we're going to refine the way our
scripts communicate with each other using ports.
*/
import { getServers, copyScripts, checkTarget, isPrepped, prep } from "/S2utils.js";
const TYPES = ["hack", "weaken1", "grow", "weaken2"];
const WORKERS = ["S2tHack.js", "S2tWeaken.js", "S2tGrow.js"];
const SCRIPTS = { hack: "S2tHack.js", weaken1: "S2tWeaken.js", grow: "S2tGrow.js", weaken2: "S2tWeaken.js" };
const COSTS = { hack: 1.7, weaken1: 1.75, grow: 1.75, weaken2: 1.75 };
const OFFSETS = { hack: 0, weaken1: 1, grow: 2, weaken2: 3 };
/*
Most of the changes are in the main function, so I've moved it up top. I generally prefer having the main function at the
top of the file anyway.
*/
/** @param {NS} ns */
export async function main(ns) {
await ns.sleep(500);
// Moving most of our active feeback to the tail window so that batches finishing don't get swept away.
ns.disableLog("ALL");
ns.tail();
// Stick the whole script in a loop. That's it, see you in part 3.
// Just kidding, there's a bit more to it.
let batchCount = 0;
while (true) {
// Register a port using the script's unique handle.
// I like to keep ports strictly coupled to a specific script, but you can use whatever number you like.
const dataPort = ns.getPortHandle(ns.pid);
dataPort.clear() // Make sure there's no random data left in the port.
let target = "n00dles";
const servers = getServers(ns, (server) => {
// Don't worry if you don't have Formulas, it's not needed at all here.
target = checkTarget(ns, server, target, ns.fileExists("Formulas.exe", "home"));
copyScripts(ns, server, WORKERS, true);
return ns.hasRootAccess(server);
});
//target = "n00dles";
const ramNet = new RamNet(ns, servers);
const metrics = new Metrics(ns, target);
if (!isPrepped(ns, target)) await prep(ns, metrics, ramNet);
optimizeBatch(ns, metrics, ramNet); // The same optimization algorithm works just fine for protobatching.
metrics.calculate(ns);
const batch = [];
batchCount++;
for (const type of TYPES) {
// We've removed the buffer. You'll see why later.
metrics.ends[type] = Date.now() + metrics.wTime + metrics.spacer * OFFSETS[type];
const job = new Job(type, metrics);
job.batch = batchCount; // This is a bit of a hack. We'll do it better in the next part.
if (!ramNet.assign(job)) {
ns.print(`ERROR: Unable to assign ${type}. Dumping debug info:`);
ns.print(job);
ns.print(metrics);
ramNet.printBlocks(ns);
return;
}
batch.push(job);
}
// We do a bit more during deployment now.
for (const job of batch) {
job.end += metrics.delay;
const jobPid = ns.exec(SCRIPTS[job.type], job.server, { threads: job.threads, temporary: true }, JSON.stringify(job));
if (!jobPid) throw new Error(`Unable to deploy ${job.type}`); // If the exec fails for any reason, error out.
/*
If a worker deploys late, it will communicate back how late it was, so that the other scripts can adjust.
Note that for this we use the *worker's* port instead of our controller's port. It's good practice to make
sure your ports have a very narrow focus.
*/
const tPort = ns.getPortHandle(jobPid);
await tPort.nextWrite();
metrics.delay += tPort.read();
}
const timer = setInterval(() => {
ns.clearLog();
ns.print(`Hacking \$${ns.formatNumber(metrics.maxMoney * metrics.greed)} from ${metrics.target}`)
ns.print(`Running batch: ETA ${ns.tFormat(metrics.ends.weaken2 - Date.now())}`);
}, 1000);
ns.atExit(() => {
clearInterval(timer);
});
// Wait for the weaken2 worker to report back. For now I've just hardcoded the Job class to tell only
// weaken2 to report. This behavior will change later.
await dataPort.nextWrite();
dataPort.clear(); // For now we don't actually need the information here, we're just using it for timing.
clearInterval(timer);
}
}
class Job {
constructor(type, metrics, server = "none") {
this.type = type;
this.end = metrics.ends[type];
this.time = metrics.times[type];
this.target = metrics.target;
this.threads = metrics.threads[type];
this.cost = this.threads * COSTS[type];
this.server = server;
this.report = this.type === "weaken2"; // For now, only w2 jobs report.
this.port = metrics.port; // This lets the workers know which port to write to.
this.batch = 0; // We'll keep track of how many we've run, just because we can.
}
}
/** @param {NS} ns */
class Metrics {
constructor(ns, server) {
this.target = server;
this.maxMoney = ns.getServerMaxMoney(server);
this.money = Math.max(ns.getServerMoneyAvailable(server), 1);
this.minSec = ns.getServerMinSecurityLevel(server);
this.sec = ns.getServerSecurityLevel(server);
this.prepped = isPrepped(ns, server);
this.chance = 0;
this.wTime = 0;
this.delay = 0; // The cumulative delays caused by late jobs.
this.spacer = 5;
this.greed = 0.1;
this.depth = 0; // Still not using this.
this.times = { hack: 0, weaken1: 0, grow: 0, weaken2: 0 };
this.ends = { hack: 0, weaken1: 0, grow: 0, weaken2: 0 };
this.threads = { hack: 0, weaken1: 0, grow: 0, weaken2: 0 };
this.port = ns.pid;
}
calculate(ns, greed = this.greed) {
const server = this.target;
const maxMoney = this.maxMoney;
this.money = ns.getServerMoneyAvailable(server);
this.sec = ns.getServerSecurityLevel(server);
this.wTime = ns.getWeakenTime(server);
this.times.weaken1 = this.wTime;
this.times.weaken2 = this.wTime;
this.times.hack = this.wTime / 4;
this.times.grow = this.wTime * 0.8;
this.depth = this.wTime / this.spacer * 4;
const hPercent = ns.hackAnalyze(server);
const amount = maxMoney * greed;
const hThreads = Math.max(Math.floor(ns.hackAnalyzeThreads(server, amount)), 1);
const tGreed = hPercent * hThreads;
const gThreads = Math.ceil(ns.growthAnalyze(server, maxMoney / (maxMoney - maxMoney * tGreed)));
this.threads.weaken1 = Math.max(Math.ceil(hThreads * 0.002 / 0.05), 1);
this.threads.weaken2 = Math.max(Math.ceil(gThreads * 0.004 / 0.05), 1);
this.threads.hack = hThreads;
this.threads.grow = gThreads;
this.chance = ns.hackAnalyzeChance(server);
}
}
/** @param {NS} ns */
class RamNet {
#blocks = [];
#minBlockSize = Infinity;
#maxBlockSize = 0;
#totalRam = 0;
#maxRam = 0;
#prepThreads = 0;
#index = new Map();
constructor(ns, servers) {
for (const server of servers) {
if (ns.hasRootAccess(server)) {
const maxRam = ns.getServerMaxRam(server);
const ram = maxRam - ns.getServerUsedRam(server);
if (ram >= 1.60) {
const block = { server: server, ram: ram };
this.#blocks.push(block);
if (ram < this.#minBlockSize) this.#minBlockSize = ram;
if (ram > this.#maxBlockSize) this.#maxBlockSize = ram;
this.#totalRam += ram;
this.#maxRam += maxRam;
this.#prepThreads += Math.floor(ram / 1.75);
}
}
}
this.#sort();
this.#blocks.forEach((block, index) => this.#index.set(block.server, index));
}
#sort() {
this.#blocks.sort((x, y) => {
if (x.server === "home") return 1;
if (y.server === "home") return -1;
return x.ram - y.ram;
});
}
getBlock(server) {
if (this.#index.has(server)) {
return this.#blocks[this.#index.get(server)];
} else {
throw new Error(`Server ${server} not found in RamNet.`);
}
}
get totalRam() {
return this.#totalRam;
}
get maxRam() {
return this.#maxRam;
}
get maxBlockSize() {
return this.#maxBlockSize;
}
get prepThreads() {
return this.#prepThreads;
}
assign(job) {
const block = this.#blocks.find(block => block.ram >= job.cost);
if (block) {
job.server = block.server;
block.ram -= job.cost;
this.#totalRam -= job.cost;
return true;
} else return false;
}
finish(job) {
const block = this.getBlock(job.server);
block.ram += job.cost;
this.#totalRam += job.cost;
}
cloneBlocks() {
return this.#blocks.map(block => ({ ...block }));
}
printBlocks(ns) {
for (const block of this.#blocks) ns.print(block);
}
}
/**
* @param {NS} ns
* @param {Metrics} metrics
* @param {RamNet} ramNet
*/
export function optimizeBatch(ns, metrics, ramNet) {
const maxThreads = ramNet.maxBlockSize / 1.75;
const maxMoney = metrics.maxMoney;
const hPercent = ns.hackAnalyze(metrics.target);
const minGreed = 0.001;
const stepValue = 0.001;
let greed = 0.99;
while (greed > minGreed) {
const amount = maxMoney * greed;
const hThreads = Math.max(Math.floor(ns.hackAnalyzeThreads(metrics.target, amount)), 1);
const tGreed = hPercent * hThreads;
const gThreads = Math.ceil(ns.growthAnalyze(metrics.target, maxMoney / (maxMoney - maxMoney * tGreed)));
if (Math.max(hThreads, gThreads) <= maxThreads) {
const wThreads1 = Math.max(Math.ceil(hThreads * 0.002 / 0.05), 1);
const wThreads2 = Math.max(Math.ceil(gThreads * 0.004 / 0.05), 1);
const threadCosts = [hThreads * 1.7, wThreads1 * 1.75, gThreads * 1.75, wThreads2 * 1.75];
const pRam = ramNet.cloneBlocks();
let found;
for (const cost of threadCosts) {
found = false;
for (const block of pRam) {
if (block.ram < cost) continue;
found = true;
block.ram -= cost;
break;
}
if (found) continue;
break;
}
if (found) {
metrics.greed = greed;
metrics.threads = { hack: hThreads, weaken1: wThreads1, grow: gThreads, weaken2: wThreads2 };
return true;
}
}
greed -= stepValue;
}
throw new Error("Not enough ram to run even a single batch. Something has gone seriously wrong.");
}

View File

@@ -1,23 +0,0 @@
/*
Very little has changed in the workers. We uncommented a couple of parts to allow for the ping-pong deployment.
See the tWeaken.js for full comments.
*/
/** @param {NS} ns */
export async function main(ns) {
const job = JSON.parse(ns.args[0]);
let delay = job.end - job.time - Date.now();
if (delay < 0) {
ns.tprint(`WARN: Batch ${job.batch} ${job.type} was ${-delay}ms late. (${job.end})\n`);
ns.writePort(ns.pid, -delay);
delay = 0;
} else {
ns.writePort(ns.pid, 0);
}
await ns.grow(job.target, { additionalMsec: delay });
const end = Date.now();
ns.atExit(() => {
// if (job.report) ns.writePort(job.port, job.type + job.server);
ns.tprint(`Batch ${job.batch}: ${job.type} finished at ${end.toString().slice(-6)}/${Math.round(job.end).toString().slice(-6)}\n`);
});
}

View File

@@ -1,23 +0,0 @@
/*
Very little has changed in the workers. We uncommented a couple of parts to allow for the ping-pong deployment.
See the tWeaken.js for full comments.
*/
/** @param {NS} ns */
export async function main(ns) {
const job = JSON.parse(ns.args[0]);
let delay = job.end - job.time - Date.now();
if (delay < 0) {
ns.tprint(`WARN: Batch ${job.batch} ${job.type} was ${-delay}ms late. (${job.end})\n`);
ns.writePort(ns.pid, -delay);
delay = 0;
} else {
ns.writePort(ns.pid, 0);
}
await ns.hack(job.target, { additionalMsec: delay });
const end = Date.now();
ns.atExit(() => {
// if (job.report) ns.writePort(job.port, job.type + job.server);
ns.tprint(`Batch ${job.batch}: ${job.type} finished at ${end.toString().slice(-6)}/${Math.round(job.end).toString().slice(-6)}\n`);
});
}

View File

@@ -1,27 +0,0 @@
/*
Very little has changed in the workers. We uncommented a couple of parts to allow for the ping-pong deployment.
See the tWeaken.js for full comments.
*/
/** @param {NS} ns */
export async function main(ns) {
const job = JSON.parse(ns.args[0]);
let delay = job.end - job.time - Date.now();
if (delay < 0) {
// We now write back to the controller if jobs are delayed so that it can adjust the other jobs to match.
ns.tprint(`WARN: Batch ${job.batch} ${job.type} was ${-delay}ms late. (${job.end})\n`);
ns.writePort(ns.pid, -delay);
delay = 0;
} else {
ns.writePort(ns.pid, 0);
}
await ns.weaken(job.target, { additionalMsec: delay });
const end = Date.now();
// Write back to let the controller know that we're done. The actual data is currently only used by the prep function.
ns.atExit(() => {
if (job.report) ns.writePort(job.port, job.type + job.server);
ns.tprint(`Batch ${job.batch}: ${job.type} finished at ${end.toString().slice(-6)}/${Math.round(job.end).toString().slice(-6)}\n`);
});
}

View File

@@ -1,218 +0,0 @@
/*
The utility function library. This is almost entirely unchanged from part 1, aside from the prep function
printing to the log console instead of the terminal.
*/
/** @param {NS} ns */
export async function main(ns) {
ns.tprint("This is just a function library, it doesn't do anything.");
}
// The recursive server navigation algorithm. The lambda predicate determines which servers to add to the final list.
// You can also plug other functions into the lambda to perform other tasks that check all servers at the same time.
/** @param {NS} ns */
export function getServers(ns, lambdaCondition = () => true, hostname = "home", servers = [], visited = []) {
if (visited.includes(hostname)) return;
visited.push(hostname);
if (lambdaCondition(hostname)) servers.push(hostname);
const connectedNodes = ns.scan(hostname);
if (hostname !== "home") connectedNodes.shift();
for (const node of connectedNodes) getServers(ns, lambdaCondition, node, servers, visited);
return servers;
}
// Here are a couple of my own getServers modules.
// This one finds the best target for hacking. It tries to balance expected return with time taken.
/** @param {NS} ns */
export function checkTarget(ns, server, target = "n00dles", forms = false) {
if (!ns.hasRootAccess(server)) return target;
const player = ns.getPlayer();
const serverSim = ns.getServer(server);
const pSim = ns.getServer(target);
let previousScore;
let currentScore;
if (serverSim.requiredHackingSkill <= player.skills.hacking / (forms ? 1 : 2)) {
if (forms) {
serverSim.hackDifficulty = serverSim.minDifficulty;
pSim.hackDifficulty = pSim.minDifficulty;
previousScore = pSim.moneyMax / ns.formulas.hacking.weakenTime(pSim, player) * ns.formulas.hacking.hackChance(pSim, player);
currentScore = serverSim.moneyMax / ns.formulas.hacking.weakenTime(serverSim, player) * ns.formulas.hacking.hackChance(serverSim, player);
} else {
previousScore = pSim.moneyMax / pSim.minDifficulty;
currentScore = serverSim.moneyMax / serverSim.minDifficulty;
}
if (currentScore > previousScore) target = server;
}
return target;
}
// A simple function for copying a list of scripts to a server.
/** @param {NS} ns */
export function copyScripts(ns, server, scripts, overwrite = false) {
for (const script of scripts) {
if ((!ns.fileExists(script, server) || overwrite) && ns.hasRootAccess(server)) {
ns.scp(script, server);
}
}
}
// A generic function to check that a given server is prepped. Mostly just a convenience.
export function isPrepped(ns, server) {
const tolerance = 0.0001;
const maxMoney = ns.getServerMaxMoney(server);
const money = ns.getServerMoneyAvailable(server);
const minSec = ns.getServerMinSecurityLevel(server);
const sec = ns.getServerSecurityLevel(server);
const secFix = Math.abs(sec - minSec) < tolerance;
return (money === maxMoney && secFix) ? true : false;
}
/*
This prep function isn't part of the tutorial, but the rest of the code wouldn't work without it.
I don't make any guarantees, but I've been using it and it's worked well enough. I'll comment it anyway.
The prep strategy uses a modified proto-batching technique, which will be covered in part 2.
*/
/** @param {NS} ns */
export async function prep(ns, values, ramNet) {
const maxMoney = values.maxMoney;
const minSec = values.minSec;
let money = values.money;
let sec = values.sec;
while (!isPrepped(ns, values.target)) {
const wTime = ns.getWeakenTime(values.target);
const gTime = wTime * 0.8;
const dataPort = ns.getPortHandle(ns.pid);
dataPort.clear();
const pRam = ramNet.cloneBlocks();
const maxThreads = Math.floor(ramNet.maxBlockSize / 1.75);
const totalThreads = ramNet.prepThreads;
let wThreads1 = 0;
let wThreads2 = 0;
let gThreads = 0;
let batchCount = 1;
let script, mode;
/*
Modes:
0: Security only
1: Money only
2: One shot
*/
if (money < maxMoney) {
gThreads = Math.ceil(ns.growthAnalyze(values.target, maxMoney / money));
wThreads2 = Math.ceil(ns.growthAnalyzeSecurity(gThreads) / 0.05);
}
if (sec > minSec) {
wThreads1 = Math.ceil((sec - minSec) * 20);
if (!(wThreads1 + wThreads2 + gThreads <= totalThreads && gThreads <= maxThreads)) {
gThreads = 0;
wThreads2 = 0;
batchCount = Math.ceil(wThreads1 / totalThreads);
if (batchCount > 1) wThreads1 = totalThreads;
mode = 0;
} else mode = 2;
} else if (gThreads > maxThreads || gThreads + wThreads2 > totalThreads) {
mode = 1;
const oldG = gThreads;
wThreads2 = Math.max(Math.floor(totalThreads / 13.5), 1);
gThreads = Math.floor(wThreads2 * 12.5);
batchCount = Math.ceil(oldG / gThreads);
} else mode = 2;
// Big buffer here, since all the previous calculations can take a while. One second should be more than enough.
const wEnd1 = Date.now() + wTime + 1000;
const gEnd = wEnd1 + values.spacer;
const wEnd2 = gEnd + values.spacer;
// "metrics" here is basically a mock Job object. Again, this is just an artifact of repurposed old code.
const metrics = {
batch: "prep",
target: values.target,
type: "none",
time: 0,
end: 0,
port: ns.pid,
log: values.log,
report: false
};
// Actually assigning threads. We actually allow grow threads to be spread out in mode 1.
// This is because we don't mind if the effect is a bit reduced from higher security unlike a normal batcher.
// We're not trying to grow a specific amount, we're trying to grow as much as possible.
for (const block of pRam) {
while (block.ram >= 1.75) {
const bMax = Math.floor(block.ram / 1.75)
let threads = 0;
if (wThreads1 > 0) {
script = "S2tWeaken.js";
metrics.type = "pWeaken1";
metrics.time = wTime;
metrics.end = wEnd1;
threads = Math.min(wThreads1, bMax);
if (wThreads2 === 0 && wThreads1 - threads <= 0) metrics.report = true;
wThreads1 -= threads;
} else if (wThreads2 > 0) {
script = "S2tWeaken.js";
metrics.type = "pWeaken2";
metrics.time = wTime;
metrics.end = wEnd2;
threads = Math.min(wThreads2, bMax);
if (wThreads2 - threads === 0) metrics.report = true;
wThreads2 -= threads;
} else if (gThreads > 0 && mode === 1) {
script = "S2tGrow.js";
metrics.type = "pGrow";
metrics.time = gTime;
metrics.end = gEnd;
threads = Math.min(gThreads, bMax);
metrics.report = false;
gThreads -= threads;
} else if (gThreads > 0 && bMax >= gThreads) {
script = "S2tGrow.js";
metrics.type = "pGrow";
metrics.time = gTime;
metrics.end = gEnd;
threads = gThreads;
metrics.report = false;
gThreads = 0;
} else break;
metrics.server = block.server;
const pid = ns.exec(script, block.server, { threads: threads, temporary: true }, JSON.stringify(metrics));
if (!pid) throw new Error("Unable to assign all jobs.");
block.ram -= 1.75 * threads;
}
}
// Fancy UI stuff to update you on progress.
const tEnd = ((mode === 0 ? wEnd1 : wEnd2) - Date.now()) * batchCount + Date.now();
const timer = setInterval(() => {
ns.clearLog();
switch (mode) {
case 0:
ns.print(`Weakening security on ${values.target}...`);
break;
case 1:
ns.print(`Maximizing money on ${values.target}...`);
break;
case 2:
ns.print(`Finalizing preparation on ${values.target}...`);
}
ns.print(`Security: +${ns.formatNumber(sec - minSec, 3)}`);
ns.print(`Money: \$${ns.formatNumber(money, 2)}/${ns.formatNumber(maxMoney, 2)}`);
const time = tEnd - Date.now();
ns.print(`Estimated time remaining: ${ns.tFormat(time)}`);
ns.print(`~${batchCount} ${(batchCount === 1) ? "batch" : "batches"}.`);
}, 200);
ns.atExit(() => clearInterval(timer));
// Wait for the last weaken to finish.
do await dataPort.nextWrite(); while (!dataPort.read().startsWith("pWeaken"));
clearInterval(timer);
await ns.sleep(100);
money = ns.getServerMoneyAvailable(values.target);
sec = ns.getServerSecurityLevel(values.target);
}
return true;
}

View File

@@ -1 +0,0 @@
{"home":{"n00dles":{},"foodnstuff":{},"sigma-cosmetics":{"zer0":{"omega-net":{"the-hub":{},"netlink":{"rothman-uni":{"rho-construction":{"aerocorp":{"unitalife":{"icarus":{"infocomm":{"titan-labs":{"fulcrumtech":{"omnitek":{},"4sigma":{"powerhouse-fitness":{}}}}}},"solaris":{"nova-med":{"run4theh111z":{}}}}}}},"catalyst":{}}}},"max-hardware":{}},"joesguns":{},"hong-fang-tea":{},"harakiri-sushi":{"nectar-net":{"neo-net":{"avmnite-02h":{}},"phantasy":{}},"CSEC":{"silver-helix":{"computek":{"zb-institute":{"lexo-corp":{"global-pharm":{"omnia":{"defcomm":{"zb-def":{"microdyne":{"vitalife":{"kuai-gong":{},".":{"b-and-a":{},"blade":{"fulcrumassets":{}},"nwo":{"The-Cave":{}},"clarkinc":{"ecorp":{},"megacorp":{}}}}}}}},"deltaone":{"univ-energy":{"taiyang-digital":{"applied-energetics":{"stormtech":{},"helios":{}}}},"zeus-med":{}}}},"alpha-ent":{"galactic-cyber":{}}}},"johnson-ortho":{"summit-uni":{"millenium-fitness":{}},"I.I.I.I":{"aevum-police":{"snap-fitness":{}}}},"crush-fitness":{"syscore":{}}}}},"iron-gym":{},"darkweb":{},"pserv-0":{},"pserv-1":{},"pserv-2":{},"pserv-3":{},"pserv-4":{},"pserv-5":{},"pserv-6":{},"pserv-7":{},"pserv-8":{},"pserv-9":{},"pserv-10":{},"pserv-11":{},"pserv-12":{},"pserv-13":{},"pserv-14":{},"pserv-15":{},"pserv-16":{},"pserv-17":{},"pserv-18":{},"pserv-19":{},"pserv-20":{},"pserv-21":{},"pserv-22":{},"pserv-23":{},"pserv-24":{}}}

View File

@@ -1,5 +0,0 @@
import { getCracks, scanServerList, findBestTarget, crackingAndRooting, copyAndRunScript, runControllerOnPserv } from "/RamsesUtils.js";
/** @param {NS} ns */
export async function main(ns) {
await runControllerOnPserv(ns)
}

View File

@@ -1,14 +0,0 @@
/** @param {NS} ns */
export async function main(ns) {
let player = null;
player = ns.getPlayer();
ns.tprint(player)
/*
while (true) {
ns.singularity.
}
*/
}

View File

@@ -1,147 +0,0 @@
/** @param {NS} ns */
export async function main(ns) {
//Arguments
const sTarget = ns.args[0]; // target server
let nFrequency = ns.args[1]; // frequency to run the Hack / Grow / Weaken
const bIgnoreRAM = ns.args[2]; //if true the script will run even if estimated RAM is too low
//Settings
const sWeakenScript = "Ramses-weaken.js";
const sGrowScript = "Ramses-grow.js";
const sHackScript = "Ramses-hack.js";
const nWeakenThreads = 5;
const nGrowThreads = 10;
const nHackThreads = 1;
//logs
ns.disableLog("getServerMaxRam");
ns.disableLog("getServerUsedRam");
ns.disableLog("getServerMinSecurityLevel");
ns.disableLog("getServerMaxMoney");
ns.disableLog("getServerSecurityLevel");
ns.disableLog("getServerMoneyAvailable");
//abort script if sTarget is undefined
if (sTarget === undefined) {
ns.tprint("1st arg sTarget is undefined");
return false;
}
//how often do we run script in milliseconds
if (nFrequency === undefined) {
nFrequency = 20000; //run every 20 seconds unless defined as the 2nd argument when calling the script
}
//target server info
const nMinSecurity = ns.getServerMinSecurityLevel(sTarget);
const nMaxMoney = ns.getServerMaxMoney(sTarget);
//abort script if sTarget cant have money
if (nMaxMoney <= 0) {
ns.tprint("sTarget (" + sTarget + ") has no nMaxMoney");
return false;
}
//main variables
const oRunner = ns.getServer(); //which server object is running this script
const sRunner = oRunner.hostname; //hostname string of the server running the script
const nMaxRAM = ns.getServerMaxRam(sRunner);
const nUsedRAM = ns.getServerUsedRam(sRunner);
let nFreeRam = nMaxRAM - nUsedRAM;
const sScriptName = ns.getScriptName();
const nScriptSize = ns.getScriptRam(sScriptName, sRunner);
const nCores = oRunner.cpuCores;
const nWeakenScriptRAM = ns.getScriptRam(sWeakenScript, sRunner) * nWeakenThreads;
const nGrowScriptRAM = ns.getScriptRam(sGrowScript, sRunner) * nGrowThreads;
const nHackScriptRAM = ns.getScriptRam(sHackScript, sRunner) * nHackThreads;
const nWeakenTime = ns.getWeakenTime(sTarget);
const nGrowTime = ns.getGrowTime(sTarget);
const nHackTime = ns.getHackTime(sTarget);
ns.tprint(sScriptName + " nScriptSize = " + nScriptSize+"GB");
const nMaxWeakenRAM = Math.ceil(nWeakenScriptRAM * ((nWeakenTime / 1000) / (nFrequency / 1000)));
ns.tprint("nWeakenTime = " + nWeakenTime / 1000);
ns.tprint("nFrequency = " + nFrequency / 1000);
ns.tprint("nMaxWeakenRAM = " + nMaxWeakenRAM);
const nMaxGrowRAM = Math.ceil(nGrowScriptRAM * ((nGrowTime / 1000) / (nFrequency / 1000)));
ns.tprint("nGrowTime = " + nGrowTime / 1000);
ns.tprint("nFrequency = " + nFrequency / 1000);
ns.tprint("nMaxGrowRAM = " + nMaxGrowRAM);
const nMaxHackRAM = Math.ceil(nHackScriptRAM * ((nHackTime / 1000) / (nFrequency / 1000)));
ns.tprint("nHackTime = " + nHackTime / 1000);
ns.tprint("nFrequency = " + nFrequency / 1000);
ns.tprint("nMaxHackRAM = " + nMaxHackRAM);
const nTotalRAM = (nScriptSize + nMaxWeakenRAM + nMaxGrowRAM + nMaxHackRAM) * 1.07;
ns.tprint("Total estimated required RAM = " + nTotalRAM+"GB");
ns.tprint("Available RAM: " + nFreeRam+" / "+nMaxRAM+"GB");
if (nTotalRAM < nFreeRam || bIgnoreRAM === true) {
while (true) {
//server stats
let nCurrentSecurity = ns.getServerSecurityLevel(sTarget);
let nCurrentMoney = ns.getServerMoneyAvailable(sTarget);
//timestamp
let currentDate = new Date();
let nOffset;
ns.print("Cash: " + (Math.floor(nCurrentMoney * 1000) / 1000) + " / " + nMaxMoney);
ns.print("Security: " + (Math.floor(nCurrentSecurity * 1000) / 1000) + " / " + nMinSecurity);
//Calculate estimate time of completion
nOffset = ns.getWeakenTime(sTarget);
let nWeakTime = new Date(currentDate.getTime() + nOffset);
let sWeakTime = nWeakTime.toLocaleTimeString('sw-SV'); //swedish time
//Print estimated time of completion
ns.print("Weakening " + sTarget + " Estimated complete at " + sWeakTime);
if (nCurrentSecurity <= (nMinSecurity + 5)) {
//Calculate estimate time of completion
nOffset = ns.getGrowTime(sTarget);
let nGrowTime = new Date(currentDate.getTime() + nOffset);
let sGrowTime = nGrowTime.toLocaleTimeString('sw-SV'); //swedish time
//Print estimated time of completion
ns.print("Growing " + sTarget + " Estimated complete at " + sGrowTime);
if (nCurrentMoney >= nMaxMoney * 0.8) {
//Calculate estimate time of completion
nOffset = ns.getHackTime(sTarget);
let nHackTime = new Date(currentDate.getTime() + nOffset);
let sHackTime = nHackTime.toLocaleTimeString('sw-SV'); //swedish time
//Print estimated time of completion
ns.print("Hacking " + sTarget + " Estimated complete at " + sHackTime);
ns.run(sHackScript, 1, sTarget);
}
ns.run(sGrowScript, 10, sTarget);
}
ns.run(sWeakenScript, 5, sTarget);
nFreeRam = ns.getServerMaxRam(sRunner) - ns.getServerUsedRam(sRunner);
await ns.sleep(nFrequency);
ns.print("-------------------------------------------------------------------------");
}
}
else {
ns.tprint("Insufficient estimated required RAM... no scripts were started...");
}
}

View File

@@ -1 +0,0 @@
{"serverName":"omega-net","maxRam":32,"maxMoney":69174578,"minSec":10,"minPorts":2,"minHackLvl":202,"rootAccess":true,"factorMoneyPerTime":0.0319075530954955,"openPorts":0,"serverFiles":["S4tGrow.js","S4tHack.js","S4tWeaken.js","the-new-god.lit"]}

View File

@@ -1,384 +0,0 @@
/*
Welcome to part 3. I'll only be commenting on things that have changed from the previous parts, so if there's something
confusing, be sure to go back and look at parts 1 and 2 for more detailed explanations.
This time we're going to make a shotgun batcher. In some ways this is really just a protobatcher that makes a
much larger batch. We're going to fill up ram with as many batches as we can manage, wait for them to finish, then
fire off another blast.
Note that this is mainly written with the fact that I intend to adapt this into a continuous batcher later in mind.
There are far more optimal ways to run a shotgun-style batcher, but rather than make the best shotgun I could,
I aimed to make this an ideal stepping stone on the quest for a continuous batcher.
*/
import { getServers, copyScripts, checkTarget, isPrepped, prep } from "/utils.js";
const TYPES = ["hack", "weaken1", "grow", "weaken2"];
const WORKERS = ["tHack.js", "tWeaken.js", "tGrow.js"];
const SCRIPTS = { hack: "tHack.js", weaken1: "tWeaken.js", grow: "tGrow.js", weaken2: "tWeaken.js" };
const COSTS = { hack: 1.7, weaken1: 1.75, grow: 1.75, weaken2: 1.75 };
// We won't be using the offsets anymore, but I've left them here in case we bring them back for a later part.
// const OFFSETS = { hack: 0, weaken1: 1, grow: 2, weaken2: 3 };
/** @param {NS} ns */
export async function main(ns) {
ns.disableLog("ALL");
ns.tail();
while (true) {
// Setup is mostly the same.
const dataPort = ns.getPortHandle(ns.pid);
dataPort.clear();
let target = "n00dles";
const servers = getServers(ns, (server) => {
target = checkTarget(ns, server, target, ns.fileExists("Formulas.exe", "home"));
copyScripts(ns, server, WORKERS, true);
return ns.hasRootAccess(server);
});
/* manual override */
target = "max-hardware";
const ramNet = new RamNet(ns, servers);
const metrics = new Metrics(ns, target);
if (!isPrepped(ns, target)) await prep(ns, metrics, ramNet);
ns.clearLog();
ns.print("Optimizing. This may take a few seconds...")
/*
New optimizer is async because it can take upwards of 5 seconds to run. We can afford the heavy
computations because shotgun batchers are very front-loaded. In a "real" shotgun batcher, you'll want
to modify the ramnet so that you can do this during the downtime between mega-batches.
*/
await optimizeShotgun(ns, metrics, ramNet); // See the function below for details.
metrics.calculate(ns);
// I've renamed the schedule array from "batch" to "jobs" just for clarity purposes.
// The batchCount declaration has also been moved down here because we use it for scheduling.
const jobs = [];
let batchCount = 0;
// Another change. Instead of tracking the end times by type, I'm now using a unified end time.
// This makes the scheduling a bit simpler as long as we're always going in chronological order.
metrics.end = Date.now() + metrics.wTime - metrics.spacer;
// Instead of one batch, we repeat the scheduling based on the depth calculated by the optimizer.
while (batchCount++ < metrics.depth) {
for (const type of TYPES) {
// As you can see, calculating the end time for each new job is much simpler this way.
// The rest of the scheduling is mostly unchanged.
metrics.end += metrics.spacer;
// Batchcount is part of the constructor now. Yes I was that lazy in the last part.
const job = new Job(type, metrics, batchCount);
if (!ramNet.assign(job)) {
ns.print(`ERROR: Unable to assign ${type}. Dumping debug info:`);
ns.print(job);
ns.print(metrics);
ramNet.printBlocks(ns);
return;
}
jobs.push(job);
}
}
/*
Deployment is completely unchanged. However, with the much larger batch sizes, you may find that
this can potentially freeze the game for minutes at a time. If it's too disruptive or triggers the
infinite loop failsafe, you can uncomment the sleep line.
There's really no need to do this synchronously for our batcher, but in a "real" shotgun batcher, you wouldn't
use any spacers at all, and try to keep deployment time and execution time down to as little as possible in order
to minimize downtime.
*/
for (const job of jobs) {
job.end += metrics.delay;
const jobPid = ns.exec(SCRIPTS[job.type], job.server, { threads: job.threads, temporary: true }, JSON.stringify(job));
if (!jobPid) throw new Error(`Unable to deploy ${job.type}`);
const tPort = ns.getPortHandle(jobPid);
await tPort.nextWrite();
metrics.delay += tPort.read();
}
/*
This is a silly hack. Due to the way arrays work in JS, pop() is much faster than shift() and we're
going to be accessing these jobs in FIFO order in a moment (ie. a queue). Since we've got lots of downtime
and the jobs array can get really huge, I just reverse them now to save time later.
We'll be implementing a more sophisticated schedule in the next part.
*/
jobs.reverse();
// I've stepped up the logging/feedback a bit here, but it's otherwise pretty much the same.
const timer = setInterval(() => {
ns.clearLog();
ns.print(`Hacking ~\$${ns.formatNumber(metrics.maxMoney * metrics.greed * batchCount * metrics.chance)} from ${metrics.target}`);
ns.print(`Greed: ${Math.floor(metrics.greed * 1000) / 10}%`);
ns.print(`Ram available: ${ns.formatRam(ramNet.totalRam)}/${ns.formatRam(ramNet.maxRam)}`);
ns.print(`Total delay: ${metrics.delay}ms`);
ns.print(`Active jobs remaining: ${jobs.length}`);
ns.print(`ETA ${ns.tFormat(metrics.end - Date.now())}`);
}, 1000);
ns.atExit(() => {
clearInterval(timer);
});
/*
As each job finishes, we update the ramnet to reflect it. Once the queue is empty, we start over.
Updating the ramnet like this isn't really necessary since we're just going to rebuild it entirely in
the next iteration, but I wanted to demonstrate what it will look like in preparation for the next part.
*/
do {
await dataPort.nextWrite();
dataPort.clear();
// It's technically possible that some of these might finish out of order due to lag or something.
// But it doesn't actually matter since we're not doing anything with this data yet.
ramNet.finish(jobs.pop());
} while (jobs.length > 0);
clearInterval(timer);
}
}
// The Job class, lean as it is, remains mostly unchanged. I got rid of the server argument since I wasn't using it
// and added a batch number instead.
class Job {
constructor(type, metrics, batch) {
this.type = type;
// this.end = metrics.ends[type]; // Left in for now, in case I decided to use it again later.
this.end = metrics.end; // Using the unified end time now.
this.time = metrics.times[type];
this.target = metrics.target;
this.threads = metrics.threads[type];
this.cost = this.threads * COSTS[type];
this.server = "none";
this.report = true; // All workers now report when they finish.
this.port = metrics.port;
this.batch = batch;
// Future stuff. Ignore these.
// this.status = "active";
// this.id = type + batch;
}
}
// Almost entirely the same, aside from the changes to end time.
/** @param {NS} ns */
class Metrics {
constructor(ns, server) {
this.target = server;
this.maxMoney = ns.getServerMaxMoney(server);
this.money = Math.max(ns.getServerMoneyAvailable(server), 1);
this.minSec = ns.getServerMinSecurityLevel(server);
this.sec = ns.getServerSecurityLevel(server);
this.prepped = isPrepped(ns, server);
this.chance = 0;
this.wTime = 0;
this.delay = 0;
this.spacer = 5;
this.greed = 0.1;
this.depth = 0; // The number of concurrent batches to run. Set by the optimizer.
this.times = { hack: 0, weaken1: 0, grow: 0, weaken2: 0 };
this.end = 0; // Slight change for the new timing. The old way in commented out in case I switch back later.
// this.ends = { hack: 0, weaken1: 0, grow: 0, weaken2: 0 };
this.threads = { hack: 0, weaken1: 0, grow: 0, weaken2: 0 };
this.port = ns.pid;
}
// Almost totally unchanged, except that I've commented out the default depth calculation, since it's done elsewhere.
calculate(ns, greed = this.greed) {
const server = this.target;
const maxMoney = this.maxMoney;
this.money = ns.getServerMoneyAvailable(server);
this.sec = ns.getServerSecurityLevel(server);
this.wTime = ns.getWeakenTime(server);
this.times.weaken1 = this.wTime;
this.times.weaken2 = this.wTime;
this.times.hack = this.wTime / 4;
this.times.grow = this.wTime * 0.8;
// this.depth = this.wTime / this.spacer * 4;
const hPercent = ns.hackAnalyze(server);
const amount = maxMoney * greed;
const hThreads = Math.max(Math.floor(ns.hackAnalyzeThreads(server, amount)), 1);
const tGreed = hPercent * hThreads;
// Okay I lied. We now overestimate grow threads by 1%. This helps prevent level ups from causing desyncs.
// Only a little, though. If you gain too many levels per shotgun blast, it will still have to re-prep the server.
const gThreads = Math.ceil(ns.growthAnalyze(server, maxMoney / (maxMoney - maxMoney * tGreed)) * 1.01);
this.threads.weaken1 = Math.max(Math.ceil(hThreads * 0.002 / 0.05), 1);
this.threads.weaken2 = Math.max(Math.ceil(gThreads * 0.004 / 0.05), 1);
this.threads.hack = hThreads;
this.threads.grow = gThreads;
this.chance = ns.hackAnalyzeChance(server);
}
}
// Once again, not a whole lot of changes. I've added a new function in support of the optimizer. Details below.
/** @param {NS} ns */
class RamNet {
#blocks = [];
#minBlockSize = Infinity;
#maxBlockSize = 0;
#totalRam = 0;
#maxRam = 0;
#prepThreads = 0;
#index = new Map();
constructor(ns, servers) {
for (const server of servers) {
if (ns.hasRootAccess(server)) {
const maxRam = ns.getServerMaxRam(server);
const ram = maxRam - ns.getServerUsedRam(server);
if (ram >= 1.60) {
const block = { server: server, ram: ram };
this.#blocks.push(block);
if (ram < this.#minBlockSize) this.#minBlockSize = ram;
if (ram > this.#maxBlockSize) this.#maxBlockSize = ram;
this.#totalRam += ram;
this.#maxRam += maxRam;
this.#prepThreads += Math.floor(ram / 1.75);
}
}
}
this.#sort();
this.#blocks.forEach((block, index) => this.#index.set(block.server, index));
}
#sort() {
this.#blocks.sort((x, y) => {
if (x.server === "home") return 1;
if (y.server === "home") return -1;
return x.ram - y.ram;
});
}
get totalRam() {
return this.#totalRam;
}
get maxRam() {
return this.#maxRam;
}
get maxBlockSize() {
return this.#maxBlockSize;
}
get prepThreads() {
return this.#prepThreads;
}
getBlock(server) {
if (this.#index.has(server)) {
return this.#blocks[this.#index.get(server)];
} else {
throw new Error(`Server ${server} not found in RamNet.`);
}
}
assign(job) {
const block = this.#blocks.find(block => block.ram >= job.cost);
if (block) {
job.server = block.server;
block.ram -= job.cost;
this.#totalRam -= job.cost;
return true;
} else return false;
}
finish(job) {
const block = this.getBlock(job.server);
block.ram += job.cost;
this.#totalRam += job.cost;
}
cloneBlocks() {
return this.#blocks.map(block => ({ ...block }));
}
printBlocks(ns) {
for (const block of this.#blocks) ns.print(block);
}
// This function takes an array of job costs and simulates assigning them to see how many batches it can fit.
testThreads(threadCosts) {
// Clone the blocks, since we don't want to actually change the ramnet.
const pRam = this.cloneBlocks();
let batches = 0;
let found = true;
while (found) {
// Pretty much just a copy of assign(). Repeat until a batch fails to assign all it's jobs.
for (const cost of threadCosts) {
found = false;
const block = pRam.find(block => block.ram >= cost);
if (block) {
block.ram -= cost;
found = true;
} else break;
}
if (found) batches++; // If all of the jobs were assigned successfully, +1 batch and loop.
}
return batches; // Otherwise, we've found our number.
}
}
// This one's got some pretty big changes, even if it doesn't look like it. For one, it's now async, and you'll see why.
/**
* @param {NS} ns
* @param {Metrics} metrics
* @param {RamNet} ramNet
*/
async function optimizeShotgun(ns, metrics, ramNet) {
// Setup is mostly the same.
const maxThreads = ramNet.maxBlockSize / 1.75;
const maxMoney = metrics.maxMoney;
const hPercent = ns.hackAnalyze(metrics.target);
const wTime = ns.getWeakenTime(metrics.target); // We'll need this for one of our calculations.
const minGreed = 0.001;
const stepValue = 0.01; // Step value is now 10x higher. If you think that's overkill, it's not.
let greed = 0.99;
let best = 0; // Initializing the best value found.
// This algorithm starts out pretty much the same. We begin by weeding out the obviously way too huge greed levels.
while (greed > minGreed) {
const amount = maxMoney * greed;
const hThreads = Math.max(Math.floor(ns.hackAnalyzeThreads(metrics.target, amount)), 1);
const tGreed = hPercent * hThreads;
// 1% overestimation here too. Always make sure your calculations match.
const gThreads = Math.ceil(ns.growthAnalyze(metrics.target, maxMoney / (maxMoney - maxMoney * tGreed)) * 1.01);
if (Math.max(hThreads, gThreads) <= maxThreads) {
const wThreads1 = Math.max(Math.ceil(hThreads * 0.002 / 0.05), 1);
const wThreads2 = Math.max(Math.ceil(gThreads * 0.004 / 0.05), 1);
const threadCosts = [hThreads * 1.7, wThreads1 * 1.75, gThreads * 1.75, wThreads2 * 1.75];
// These lines were supposed to help weed out a few more too-high values, but in my unit tests they never
// actually did anything. Uncomment them if you want.
// const totalCost = threadCosts.reduce((t, c) => t + c);
// if (totalCost > ramNet.totalRam) continue;
/*
Here's where it all changes. First we calculate the number of batches we can fit into ram at the current
greed level. Then we calculate how much money that nets and how long it will take. If that income/time is
better than what we've found before, we update the metrics and then continue.
Unlike the previous version, this one checks every value. Between that and the loop to simulate assigning
jobs, this is a very heavy algorithm that can take seconds to execute if done synchronously. To prevent it
from freezing the game, we run it asynchronously and sleep after checking each value.
*/
const batchCount = ramNet.testThreads(threadCosts);
const income = tGreed * maxMoney * batchCount / (metrics.spacer * 4 * batchCount + wTime);
if (income > best) {
best = income;
metrics.greed = tGreed;
metrics.depth = batchCount;
}
}
await ns.sleep(0);
greed -= stepValue;
}
// Added the check here to only throw an error if we failed to find any valid configurations.
if (best === 0) throw new Error("Not enough ram to run even a single batch. Something has gone seriously wrong.");
}

View File

@@ -1,4 +0,0 @@
/** @param {NS} ns */
export async function main(ns) {
ns.tprint('success')
}

View File

@@ -1,164 +0,0 @@
/** @param {NS} ns */
export async function main(ns) {
let files = ["contract-115802.cct",
"contract-121862.cct",
"contract-124253.cct",
"contract-130050.cct",
"contract-132458.cct",
"contract-133951.cct",
"contract-137578.cct",
"contract-140971.cct",
"contract-141455.cct",
"contract-143455.cct",
"contract-160840.cct",
"contract-16178.cct",
"contract-166840.cct",
"contract-171215.cct",
"contract-173050.cct",
"contract-17770.cct",
"contract-18028.cct",
"contract-183510.cct",
"contract-195657.cct",
"contract-202801.cct",
"contract-204367.cct",
"contract-217301.cct",
"contract-221818.cct",
"contract-230038.cct",
"contract-236291.cct",
"contract-241097.cct",
"contract-242406.cct",
"contract-253624.cct",
"contract-25923.cct",
"contract-265811.cct",
"contract-267921.cct",
"contract-275503.cct",
"contract-278600.cct",
"contract-279485.cct",
"contract-280015.cct",
"contract-280601.cct",
"contract-286276.cct",
"contract-286508.cct",
"contract-298018.cct",
"contract-30149.cct",
"contract-302603.cct",
"contract-305569.cct",
"contract-322138.cct",
"contract-323283.cct",
"contract-328409.cct",
"contract-328979.cct",
"contract-334003.cct",
"contract-36206.cct",
"contract-375990.cct",
"contract-376805.cct",
"contract-410024.cct",
"contract-413055.cct",
"contract-423941.cct",
"contract-427686.cct",
"contract-441619.cct",
"contract-446103.cct",
"contract-448094.cct",
"contract-467871.cct",
"contract-480431.cct",
"contract-505241.cct",
"contract-516679.cct",
"contract-519369.cct",
"contract-529643.cct",
"contract-535021.cct",
"contract-535336.cct",
"contract-547419.cct",
"contract-560001.cct",
"contract-564079.cct",
"contract-570111.cct",
"contract-570844.cct",
"contract-573534.cct",
"contract-576739.cct",
"contract-580202.cct",
"contract-584555.cct",
"contract-586489.cct",
"contract-592906.cct",
"contract-599940.cct",
"contract-600802.cct",
"contract-603840.cct",
"contract-605640.cct",
"contract-6060.cct",
"contract-606205.cct",
"contract-610194.cct",
"contract-619856.cct",
"contract-631275.cct",
"contract-6317.cct",
"contract-653136.cct",
"contract-655415.cct",
"contract-658731.cct",
"contract-662427.cct",
"contract-663124.cct",
"contract-663518.cct",
"contract-669853.cct",
"contract-671683.cct",
"contract-676164.cct",
"contract-677643.cct",
"contract-681060.cct",
"contract-683911.cct",
"contract-685393.cct",
"contract-695727.cct",
"contract-696156.cct",
"contract-703758.cct",
"contract-720460.cct",
"contract-722083.cct",
"contract-727788.cct",
"contract-735210.cct",
"contract-736394.cct",
"contract-736483.cct",
"contract-748113.cct",
"contract-751169.cct",
"contract-752502.cct",
"contract-765155.cct",
"contract-772173.cct",
"contract-773439.cct",
"contract-77492.cct",
"contract-778492.cct",
"contract-784712.cct",
"contract-785014.cct",
"contract-786215.cct",
"contract-789483.cct",
"contract-7918.cct",
"contract-796855.cct",
"contract-800839.cct",
"contract-801748.cct",
"contract-81208.cct",
"contract-817514.cct",
"contract-82882.cct",
"contract-843473.cct",
"contract-843884.cct",
"contract-847170.cct",
"contract-847956.cct",
"contract-848049.cct",
"contract-856399.cct",
"contract-862326.cct",
"contract-866043.cct",
"contract-866539.cct",
"contract-870914.cct",
"contract-887241.cct",
"contract-893688.cct",
"contract-89945.cct",
"contract-900580.cct",
"contract-915646.cct",
"contract-918325.cct",
"contract-9193.cct",
"contract-921551.cct",
"contract-942582.cct",
"contract-945836.cct",
"contract-947944.cct",
"contract-954121.cct",
"contract-957901.cct",
"contract-960362.cct",
"contract-963099.cct",
"contract-965221.cct",
"contract-979556.cct",
"contract-985969.cct",
"contract-992733.cct",
"contract-996245.cct",
"contract-997464.cct"];
for (let file of files) {
ns.rm(file)
}
}

File diff suppressed because one or more lines are too long

View File

@@ -1,24 +0,0 @@
/*
Workers are mostly the same, aside from uncommented portions allowing the hack and grow workers to report.
I've also generally commented out the terminal logging, as it gets rather laggy when there's a lot of scripts
writing to terminal.
*/
/** @param {NS} ns */
export async function main(ns) {
const job = JSON.parse(ns.args[0]);
let delay = job.end - job.time - Date.now();
if (delay < 0) {
ns.tprint(`WARN: Batch ${job.batch} ${job.type} was ${-delay}ms late. (${job.end})\n`);
ns.writePort(ns.pid, -delay);
delay = 0;
} else {
ns.writePort(ns.pid, 0);
}
await ns.grow(job.target, { additionalMsec: delay });
const end = Date.now();
ns.atExit(() => {
if (job.report) ns.writePort(job.port, job.type + job.server);
// ns.tprint(`Batch ${job.batch}: ${job.type} finished at ${end.toString().slice(-6)}/${Math.round(job.end).toString().slice(-6)}\n`);
});
}

View File

@@ -1,24 +0,0 @@
/*
Workers are mostly the same, aside from uncommented portions allowing the hack and grow workers to report.
I've also generally commented out the terminal logging, as it gets rather laggy when there's a lot of scripts
writing to terminal.
*/
/** @param {NS} ns */
export async function main(ns) {
const job = JSON.parse(ns.args[0]);
let delay = job.end - job.time - Date.now();
if (delay < 0) {
ns.tprint(`WARN: Batch ${job.batch} ${job.type} was ${-delay}ms late. (${job.end})\n`);
ns.writePort(ns.pid, -delay);
delay = 0;
} else {
ns.writePort(ns.pid, 0);
}
await ns.hack(job.target, { additionalMsec: delay });
const end = Date.now();
ns.atExit(() => {
if (job.report) ns.writePort(job.port, job.type + job.server);
// ns.tprint(`Batch ${job.batch}: ${job.type} finished at ${end.toString().slice(-6)}/${Math.round(job.end).toString().slice(-6)}\n`);
});
}

View File

@@ -1,28 +0,0 @@
/*
Workers are mostly the same, aside from uncommented portions allowing the hack and grow workers to report.
I've also generally commented out the terminal logging, as it gets rather laggy when there's a lot of scripts
writing to terminal.
*/
/** @param {NS} ns */
export async function main(ns) {
const job = JSON.parse(ns.args[0]);
let delay = job.end - job.time - Date.now();
if (delay < 0) {
// We write back to the controller if jobs are delayed so that it can adjust the other jobs to match.
ns.tprint(`WARN: Batch ${job.batch} ${job.type} was ${-delay}ms late. (${job.end})\n`);
ns.writePort(ns.pid, -delay);
delay = 0;
} else {
ns.writePort(ns.pid, 0);
}
await ns.weaken(job.target, { additionalMsec: delay });
const end = Date.now();
// Write back to let the controller know that we're done.
ns.atExit(() => {
if (job.report) ns.writePort(job.port, job.type + job.server);
// ns.tprint(`Batch ${job.batch}: ${job.type} finished at ${end.toString().slice(-6)}/${Math.round(job.end).toString().slice(-6)}\n`);
});
}

View File

@@ -1,4 +0,0 @@
export async function main(ns) {
ns.tprint("Hello World!");
}
//# sourceMappingURL=data:application/json;base64,eyJ2ZXJzaW9uIjozLCJzb3VyY2VzIjpbIkM6L2dhbWVzL0JpdEJ1cm5lckdpdC92aXRlYnVybmVyLXRlbXBsYXRlL3NyYy90ZW1wbGF0ZS50cyJdLCJzb3VyY2VzQ29udGVudCI6WyJpbXBvcnQgeyBOUyB9IGZyb20gJ0Bucyc7XHJcblxyXG5leHBvcnQgYXN5bmMgZnVuY3Rpb24gbWFpbihuczogTlMpIHtcclxuICBucy50cHJpbnQoJ0hlbGxvIFdvcmxkIScpO1xyXG59XHJcbiJdLCJtYXBwaW5ncyI6IkFBRUEsc0JBQXNCLEtBQUssSUFBUTtBQUNqQyxLQUFHLE9BQU8sY0FBYztBQUMxQjsiLCJuYW1lcyI6W119

View File

@@ -1,218 +0,0 @@
/*
This file remains unchanged from the previous part, aside from updating the file paths.
I didn't even bother removing the old comments.
*/
/** @param {NS} ns */
export async function main(ns) {
ns.tprint("This is just a function library, it doesn't do anything.");
}
// The recursive server navigation algorithm. The lambda predicate determines which servers to add to the final list.
// You can also plug other functions into the lambda to perform other tasks that check all servers at the same time.
/** @param {NS} ns */
export function getServers(ns, lambdaCondition = () => true, hostname = "home", servers = [], visited = []) {
if (visited.includes(hostname)) return;
visited.push(hostname);
if (lambdaCondition(hostname)) servers.push(hostname);
const connectedNodes = ns.scan(hostname);
if (hostname !== "home") connectedNodes.shift();
for (const node of connectedNodes) getServers(ns, lambdaCondition, node, servers, visited);
return servers;
}
// Here are a couple of my own getServers modules.
// This one finds the best target for hacking. It tries to balance expected return with time taken.
/** @param {NS} ns */
export function checkTarget(ns, server, target = "n00dles", forms = false) {
if (!ns.hasRootAccess(server)) return target;
const player = ns.getPlayer();
const serverSim = ns.getServer(server);
const pSim = ns.getServer(target);
let previousScore;
let currentScore;
if (serverSim.requiredHackingSkill <= player.skills.hacking / (forms ? 1 : 2)) {
if (forms) {
serverSim.hackDifficulty = serverSim.minDifficulty;
pSim.hackDifficulty = pSim.minDifficulty;
previousScore = pSim.moneyMax / ns.formulas.hacking.weakenTime(pSim, player) * ns.formulas.hacking.hackChance(pSim, player);
currentScore = serverSim.moneyMax / ns.formulas.hacking.weakenTime(serverSim, player) * ns.formulas.hacking.hackChance(serverSim, player);
} else {
previousScore = pSim.moneyMax / pSim.minDifficulty / ns.getWeakenTime(pSim.hostname);
currentScore = serverSim.moneyMax / serverSim.minDifficulty / ns.getWeakenTime(serverSim.hostname);
}
if (currentScore > previousScore) target = server;
}
return target;
}
// A simple function for copying a list of scripts to a server.
/** @param {NS} ns */
export function copyScripts(ns, server, scripts, overwrite = false) {
for (const script of scripts) {
if ((!ns.fileExists(script, server) || overwrite) && ns.hasRootAccess(server)) {
ns.scp(script, server);
}
}
}
// A generic function to check that a given server is prepped. Mostly just a convenience.
export function isPrepped(ns, server) {
const tolerance = 0.0001;
const maxMoney = ns.getServerMaxMoney(server);
const money = ns.getServerMoneyAvailable(server);
const minSec = ns.getServerMinSecurityLevel(server);
const sec = ns.getServerSecurityLevel(server);
const secFix = Math.abs(sec - minSec) < tolerance;
return (money === maxMoney && secFix) ? true : false;
}
/*
This prep function isn't part of the tutorial, but the rest of the code wouldn't work without it.
I don't make any guarantees, but I've been using it and it's worked well enough. I'll comment it anyway.
The prep strategy uses a modified proto-batching technique, which will be covered in part 2.
*/
/** @param {NS} ns */
export async function prep(ns, values, ramNet) {
const maxMoney = values.maxMoney;
const minSec = values.minSec;
let money = values.money;
let sec = values.sec;
while (!isPrepped(ns, values.target)) {
const wTime = ns.getWeakenTime(values.target);
const gTime = wTime * 0.8;
const dataPort = ns.getPortHandle(ns.pid);
dataPort.clear();
const pRam = ramNet.cloneBlocks();
const maxThreads = Math.floor(ramNet.maxBlockSize / 1.75);
const totalThreads = ramNet.prepThreads;
let wThreads1 = 0;
let wThreads2 = 0;
let gThreads = 0;
let batchCount = 1;
let script, mode;
/*
Modes:
0: Security only
1: Money only
2: One shot
*/
if (money < maxMoney) {
gThreads = Math.ceil(ns.growthAnalyze(values.target, maxMoney / money));
wThreads2 = Math.ceil(ns.growthAnalyzeSecurity(gThreads) / 0.05);
}
if (sec > minSec) {
wThreads1 = Math.ceil((sec - minSec) * 20);
if (!(wThreads1 + wThreads2 + gThreads <= totalThreads && gThreads <= maxThreads)) {
gThreads = 0;
wThreads2 = 0;
batchCount = Math.ceil(wThreads1 / totalThreads);
if (batchCount > 1) wThreads1 = totalThreads;
mode = 0;
} else mode = 2;
} else if (gThreads > maxThreads || gThreads + wThreads2 > totalThreads) {
mode = 1;
const oldG = gThreads;
wThreads2 = Math.max(Math.floor(totalThreads / 13.5), 1);
gThreads = Math.floor(wThreads2 * 12.5);
batchCount = Math.ceil(oldG / gThreads);
} else mode = 2;
// Big buffer here, since all the previous calculations can take a while. One second should be more than enough.
const wEnd1 = Date.now() + wTime + 1000;
const gEnd = wEnd1 + values.spacer;
const wEnd2 = gEnd + values.spacer;
// "metrics" here is basically a mock Job object. Again, this is just an artifact of repurposed old code.
const metrics = {
batch: "prep",
target: values.target,
type: "none",
time: 0,
end: 0,
port: ns.pid,
log: values.log,
report: false
};
// Actually assigning threads. We actually allow grow threads to be spread out in mode 1.
// This is because we don't mind if the effect is a bit reduced from higher security unlike a normal batcher.
// We're not trying to grow a specific amount, we're trying to grow as much as possible.
for (const block of pRam) {
while (block.ram >= 1.75) {
const bMax = Math.floor(block.ram / 1.75)
let threads = 0;
if (wThreads1 > 0) {
script = "tWeaken.js";
metrics.type = "pWeaken1";
metrics.time = wTime;
metrics.end = wEnd1;
threads = Math.min(wThreads1, bMax);
if (wThreads2 === 0 && wThreads1 - threads <= 0) metrics.report = true;
wThreads1 -= threads;
} else if (wThreads2 > 0) {
script = "tWeaken.js";
metrics.type = "pWeaken2";
metrics.time = wTime;
metrics.end = wEnd2;
threads = Math.min(wThreads2, bMax);
if (wThreads2 - threads === 0) metrics.report = true;
wThreads2 -= threads;
} else if (gThreads > 0 && mode === 1) {
script = "tGrow.js";
metrics.type = "pGrow";
metrics.time = gTime;
metrics.end = gEnd;
threads = Math.min(gThreads, bMax);
metrics.report = false;
gThreads -= threads;
} else if (gThreads > 0 && bMax >= gThreads) {
script = "tGrow.js";
metrics.type = "pGrow";
metrics.time = gTime;
metrics.end = gEnd;
threads = gThreads;
metrics.report = false;
gThreads = 0;
} else break;
metrics.server = block.server;
const pid = ns.exec(script, block.server, { threads: threads, temporary: true }, JSON.stringify(metrics));
if (!pid) throw new Error("Unable to assign all jobs.");
block.ram -= 1.75 * threads;
}
}
// Fancy UI stuff to update you on progress.
const tEnd = ((mode === 0 ? wEnd1 : wEnd2) - Date.now()) * batchCount + Date.now();
const timer = setInterval(() => {
ns.clearLog();
switch (mode) {
case 0:
ns.print(`Weakening security on ${values.target}...`);
break;
case 1:
ns.print(`Maximizing money on ${values.target}...`);
break;
case 2:
ns.print(`Finalizing preparation on ${values.target}...`);
}
ns.print(`Security: +${ns.formatNumber(sec - minSec, 3)}`);
ns.print(`Money: \$${ns.formatNumber(money, 2)}/${ns.formatNumber(maxMoney, 2)}`);
const time = tEnd - Date.now();
ns.print(`Estimated time remaining: ${ns.tFormat(time)}`);
ns.print(`~${batchCount} ${(batchCount === 1) ? "batch" : "batches"}.`);
}, 200);
ns.atExit(() => clearInterval(timer));
// Wait for the last weaken to finish.
do await dataPort.nextWrite(); while (!dataPort.read().startsWith("pWeaken"));
clearInterval(timer);
await ns.sleep(100);
money = ns.getServerMoneyAvailable(values.target);
sec = ns.getServerSecurityLevel(values.target);
}
return true;
}