From 3cad1a10ca4c80b38c7b67b6d9cc5dfdb0e16709 Mon Sep 17 00:00:00 2001 From: Philipp Date: Sat, 5 Oct 2024 19:46:43 +0200 Subject: [PATCH] Reupload Ramses Scripts --- local/path/home/Ramses/Backdoor.js | 97 ++++ local/path/home/Ramses/ContractSolver.js | 161 ++++++ local/path/home/Ramses/CorpControl.js | 14 + local/path/home/Ramses/CrackAndRootAll.js | 8 + local/path/home/Ramses/RamsesUtils.js | 278 +++++++++++ local/path/home/Ramses/S4controller.js | 491 +++++++++++++++++++ local/path/home/Ramses/S4logHelper.js | 42 ++ local/path/home/Ramses/S4tGrow.js | 30 ++ local/path/home/Ramses/S4tHack.js | 30 ++ local/path/home/Ramses/S4tWeaken.js | 41 ++ local/path/home/Ramses/S4utils.js | 420 ++++++++++++++++ local/path/home/Ramses/Serverlist.js | 42 ++ local/path/home/Ramses/analyzeContract.js | 21 + local/path/home/Ramses/corp/Autosell.js | 7 + local/path/home/Ramses/corp/HireWorkers.js | 16 + local/path/home/Ramses/corp/SetupExport.js | 13 + local/path/home/Ramses/corp/Smart.js | 6 + local/path/home/Ramses/corp/UpgradeOffice.js | 12 + local/path/home/Ramses/killAllScript.js | 8 + local/path/home/Ramses/purchaseServers.js | 5 + 20 files changed, 1742 insertions(+) create mode 100644 local/path/home/Ramses/Backdoor.js create mode 100644 local/path/home/Ramses/ContractSolver.js create mode 100644 local/path/home/Ramses/CorpControl.js create mode 100644 local/path/home/Ramses/CrackAndRootAll.js create mode 100644 local/path/home/Ramses/RamsesUtils.js create mode 100644 local/path/home/Ramses/S4controller.js create mode 100644 local/path/home/Ramses/S4logHelper.js create mode 100644 local/path/home/Ramses/S4tGrow.js create mode 100644 local/path/home/Ramses/S4tHack.js create mode 100644 local/path/home/Ramses/S4tWeaken.js create mode 100644 local/path/home/Ramses/S4utils.js create mode 100644 local/path/home/Ramses/Serverlist.js create mode 100644 local/path/home/Ramses/analyzeContract.js create mode 100644 local/path/home/Ramses/corp/Autosell.js create mode 100644 local/path/home/Ramses/corp/HireWorkers.js create mode 100644 local/path/home/Ramses/corp/SetupExport.js create mode 100644 local/path/home/Ramses/corp/Smart.js create mode 100644 local/path/home/Ramses/corp/UpgradeOffice.js create mode 100644 local/path/home/Ramses/killAllScript.js create mode 100644 local/path/home/Ramses/purchaseServers.js diff --git a/local/path/home/Ramses/Backdoor.js b/local/path/home/Ramses/Backdoor.js new file mode 100644 index 0000000..77ff641 --- /dev/null +++ b/local/path/home/Ramses/Backdoor.js @@ -0,0 +1,97 @@ +/** @param {NS} ns */ +export async function main(ns, allServers) { + ns.tail(); + await scanRecursiveWrapper(ns); + let currentHackingLevel = ns.getHackingLevel(); + let currentArray = []; + let currentHop = ""; + let serverRoutes = JSON.parse(ns.read("ServerRouteList.txt")); + let allPaths = getPaths(serverRoutes); + let checkAll = ns.args[0]; + for (const entry of allPaths) { + for (const name of entry) { + if (ns.singularity.connect(name) === false) { + ns.tprint("Error when trying to connect to: " + currentHop); + return + } + + if (ns.getServer(name).hostname === "CSEC" || ns.getServer(name).hostname === "avmnite-02h" || ns.getServer(name).hostname === "I.I.I.I" || ns.getServer(name).hostname === "run4theh111z" || ns.getServer(name).hostname === "The-Cave" || checkAll === true ) { + if (!ns.getServer(name).backdoorInstalled) { + if (ns.getServerRequiredHackingLevel(name) < currentHackingLevel && ns.hasRootAccess(name) === true) { + ns.print("Trying to backdoor " + name) + await ns.singularity.installBackdoor(name); + ns.print("Success on " + name) + } + } else { continue } + } + } + } + ns.singularity.connect("home"); +} + +function getPaths(obj, path = []) { + const paths = []; + for (const key in obj) { + const newPath = [...path, key]; + paths.push(newPath); + if (typeof obj[key] === 'object' && obj[key] !== null) { + paths.push(...getPaths(obj[key], newPath)); + } + } + return paths; +} + +/** @param {NS} ns */ +async function scanRecursiveWrapper(ns) { + ns.rm("ServerRouteList.txt"); + const home = "home"; + let serverRouteList = { home: {} }; + let knownServers = []; + let unscanned = []; + unscanned.push(home); + knownServers.push(home); + while (unscanned.length > 0) { + let currentServer = unscanned.pop(); + let currentChildren = ns.scan(currentServer).filter(element => !knownServers.includes(element)); + knownServers = knownServers.concat(currentChildren); + let keyPath = findKeyPath(serverRouteList, currentServer); + let childrenObject = currentChildren.reduce((a, v) => ({ ...a, [v]: {} }), {}); + writeValueToPath(serverRouteList, keyPath, childrenObject); + for (let i = 0; i < currentChildren.length; i++) { + let child = currentChildren[i]; + unscanned.push(child); + } + } + ns.write("ServerRouteList.txt", JSON.stringify(serverRouteList), "w"); +} + +function findKeyPath(json, key) { + if (typeof json !== 'object' || json === null) { + return null; + } + if (key in json) { + return key; + } + for (const property in json) { + if (json.hasOwnProperty(property)) { + const path = findKeyPath(json[property], key); + if (path !== null) { + return property + '*' + path; + } + } + } + return null; +} + +function writeValueToPath(json, path, value) { + const parts = path.split('*'); + let currentObject = json; + for (let i = 0; i < parts.length - 1; i++) { + const part = parts[i]; + if (currentObject[part] === undefined) { + currentObject[part] = {}; + } + currentObject = currentObject[part]; + } + currentObject[parts[parts.length - 1]] = value; +} diff --git a/local/path/home/Ramses/ContractSolver.js b/local/path/home/Ramses/ContractSolver.js new file mode 100644 index 0000000..75087d4 --- /dev/null +++ b/local/path/home/Ramses/ContractSolver.js @@ -0,0 +1,161 @@ +/** @param {NS} ns */ +export async function main(ns) { + ns.tprint(ns.codingcontract.getContractTypes()) + let testcontract = ns.codingcontract.createDummyContract("Total Ways to Sum") + let contractType = ns.codingcontract.getContractType(testcontract); + ns.tprint(ns.codingcontract.getDescription(testcontract)) + let n = ns.codingcontract.getData(testcontract); + ns.tprint("Data: " + n); + let answer = ""; + if (contractType === "Find Largest Prime Factor") { + answer = largestPrimeFactor(n); + } + if (contractType === "Subarray with Maximum Sum") { + answer = SubarrayWithMaximumSum(ns, n) + } + if (contractType === "Total Ways to Sum") { + answer = TotalWaysToSum(ns, n) + } + + + ns.tprint(answer); + + ns.tprint(ns.codingcontract.attempt(answer, testcontract)); +} +/* +5: +4 1 +3 2 +3 1 1 +2 2 1 +2 1 1 1 +1 1 1 1 1 + +6: +5 1 +4 2 +4 1 1 +3 3 +3 2 1 +3 1 1 1 +2 2 2 +2 2 1 1 +2 1 1 1 1 +1 1 1 1 1 1 + +# Start with one position m filling it with the integers between 1 and target +# For each m, fill the next position n with integers between 1 and m +# Repeat as long as the sum is smaller than target. +# append all iterations to the Array and count +*/ + +function TotalWaysToSum(ns, target) { + let sumArray = []; + let inputArray = []; + let unfinishedArray = []; + let rollingSum = 0; + for (let i = 1; i < target; i++) { + inputArray.push([i]); + } + let z = 1 + while (inputArray.length > 0) { + z++ + inputArray.forEach((element) => { + rollingSum = element.reduce((a, b) => a + b, 0); + if (rollingSum === target) { + sumArray.push(element) + } else { + + for (let k = 1; k <= element[element.length-1] && k <= target - rollingSum; k++) { + + unfinishedArray.push(element.concat([k])) + } + } + } + ) + inputArray = unfinishedArray; + } + ns.tprint("Target: " +target) + ns.tprint("Length: " + sumArray.length) + return sumArray.length +} + + + + + + + + +function SubarrayWithMaximumSum(ns, givenArray) { + let arrayLength = givenArray.length; + let maxSum = -10000; + let runningSum = 0; + for (let i = 1; i <= arrayLength; i++) { + for (let j = 0; j <= arrayLength - i; j++) { + runningSum = eval(givenArray.slice(j, i + j).join('+')); + //ns.tprint("i: "+i+ " j: "+ j + " Array: "+givenArray.slice(j,i+j)+ " eval: "+ givenArray.slice(j,i+j).join('+')+"runningSum: "+runningSum); + if (maxSum < runningSum) { maxSum = runningSum }; + } + } + return maxSum +} + + +function FindLargestPrimeFactor(number) { + + let factor = 2; + while (factor * factor <= number) { + if (number % factor === 0) { + number /= factor; + } else { + factor++ + } + } + return number; +} + +/* +function FindLargestPrimeFactor(n) { + let x = Math.ceil(Math.random()*10); + let y = x; + let d = 1; + + while (d === 1) { + x = g(x, n); + y = g(g(y, n), n) + d = gcd(n, Math.abs(x - y)) + //ns.tprint("x:" + x + " y: " + y + " d: " + d) + } + if (d === n) { + return ("failure") + } + else { + return (d) + } +} + +function g(x, n) { + return (x * x) % n +} + +function gcd(a,b) { + a = Math.abs(a); + b = Math.abs(b); + if (b > a) {var temp = a; a = b; b = temp;} + while (true) { + if (b == 0) return a; + a %= b; + if (a == 0) return b; + b %= a; + } +} + + +function gcd(a, b) { + if (!b) { + return a; + } + return gcd(b, a % b); +} +*/ \ No newline at end of file diff --git a/local/path/home/Ramses/CorpControl.js b/local/path/home/Ramses/CorpControl.js new file mode 100644 index 0000000..4ed2131 --- /dev/null +++ b/local/path/home/Ramses/CorpControl.js @@ -0,0 +1,14 @@ +/** @param {NS} ns */ +export async function main(ns) { + let cities = ["Sector-12", "Aevum", "Volhaven", "Chongqing", "New Tokyo", "Ishima"]; + let corpName = "AgraNeo"; + //ns.tprint(ns.corporation.getConstants()) + //ns.corporation.getMaterial(); + //ns.corporation.buyMaterial(); + for (let city of cities) { + await ns.run("/corp/Smart.js",1,corpName,city); + await ns.tprint(ns.run("/corp/UpgradeOffice.js",1,corpName,city)); + await ns.sleep(1000) + } + +} \ No newline at end of file diff --git a/local/path/home/Ramses/CrackAndRootAll.js b/local/path/home/Ramses/CrackAndRootAll.js new file mode 100644 index 0000000..719803d --- /dev/null +++ b/local/path/home/Ramses/CrackAndRootAll.js @@ -0,0 +1,8 @@ + +import { getCracks, scanServerList, findBestTarget, crackingAndRooting, copyAndRunScript } from "/RamsesUtils.js"; +/** @param {NS} ns */ +export async function main(ns) { + let cracks = {}; + cracks = getCracks(ns); + crackingAndRooting(ns, cracks, "", false); +} \ No newline at end of file diff --git a/local/path/home/Ramses/RamsesUtils.js b/local/path/home/Ramses/RamsesUtils.js new file mode 100644 index 0000000..24d0ef4 --- /dev/null +++ b/local/path/home/Ramses/RamsesUtils.js @@ -0,0 +1,278 @@ +/** @param {NS} ns */ +export async function main(ns) { + ns.tprint("This is just a function library, it doesn't do anything."); +} + +/** @param {NS} ns */ +export function getCracks(ns) { + let cracks = {}; + if (ns.fileExists("BruteSSH.exe", "home")) { + cracks["BruteSSH.exe"] = ns.brutessh; + }; + if (ns.fileExists("FTPCrack.exe", "home")) { + cracks["FTPCrack.exe"] = ns.ftpcrack; + }; + if (ns.fileExists("relaySMTP.exe", "home")) { + cracks["relaySMTP.exe"] = ns.relaysmtp; + }; + if (ns.fileExists("HTTPWorm.exe", "home")) { + cracks["HTTPWorm.exe"] = ns.httpworm; + }; + if (ns.fileExists("SQLInject.exe", "home")) { + cracks["SQLInject.exe"] = ns.sqlinject; + }; + return cracks; +} + +/** @param {NS} ns */ +export function scanServerList(ns) { + const home = "home"; + let serverList = {}; + let unscanned = []; + unscanned.push(home); + while (unscanned.length > 0) { + let currentServer = unscanned.pop(); + if (!serverList[currentServer]) { + let maxRam = ns.getServerMaxRam(currentServer); + let minPorts = ns.getServerNumPortsRequired(currentServer); + let minSecLevel = ns.getServerMinSecurityLevel(currentServer); + let minHackLevel = ns.getServerRequiredHackingLevel(currentServer); + let rootAccess = ns.hasRootAccess(currentServer); + let serverMoney = ns.getServerMaxMoney(currentServer); + let serverFiles = ns.ls(currentServer); + let skillFactor = (2.5 * minHackLevel * minSecLevel + 500) / (ns.getHackingLevel() + 50); + let compareTimeFactor = serverMoney / skillFactor / 10e7; + serverList[currentServer] = + { + serverName: currentServer, + maxRam: maxRam, + maxMoney: serverMoney, + minSec: minSecLevel, + minPorts: minPorts, + minHackLvl: minHackLevel, + rootAccess: rootAccess, + factorMoneyPerTime: compareTimeFactor, + openPorts: 0, + serverFiles: serverFiles, + }; + let neighbours = ns.scan(currentServer); + for (let i = 0; i < neighbours.length; i++) { + let neighbour = neighbours[i]; + if (serverList[neighbour]) { + continue + } + unscanned.push(neighbour); + } + } + } + ns.write("serverList.txt", JSON.stringify(serverList), "w"); +} + +/** @param {NS} ns */ +export function findBestTarget(ns, maxSec, maxPorts, currentHackLevel, manualTargetOverride) { + if (!ns.fileExists("serverList.txt", "home")) scanServerList(); + let serverList = JSON.parse(ns.read("serverList.txt")); + let bestEntry = null; + let compareTime = 0; + for (const [name, entry] of Object.entries(serverList)) { + if (entry.minSec <= maxSec && entry.minPorts <= maxPorts && entry.minHackLvl < currentHackLevel) { + if (entry.factorMoneyPerTime > compareTime) { + compareTime = entry.factorMoneyPerTime; + bestEntry = name; + } + } + } + if (manualTargetOverride.length > 0) { + bestEntry = manualTargetOverride; + } + ns.write("bestTarget.txt", JSON.stringify(serverList[bestEntry]), "w"); +} + + +/** @param {NS} ns */ +export function crackingAndRooting(ns, cracks, funnyScript, copy) { + if (!ns.fileExists("serverList.txt", "home")) scanServerList(); + let serverList = JSON.parse(ns.read("serverList.txt")); + for (const [name, entry] of Object.entries(serverList)) { + let cracked = false; + let openPorts = serverList[name].openPorts || 0; + if (entry.minPorts === 0 || (entry.minPorts > openPorts && entry.minPorts <= Object.keys(cracks).length)) { + for (let k = 0; k < entry.minPorts; k++) { + cracks[Object.keys(cracks)[k]](name); + serverList[name].openPorts = k; + } + cracked = true; + } + if (!ns.hasRootAccess(name) && cracked === true) { + ns.nuke(name); + if (ns.hasRootAccess(name)) { + serverList[name].rootAccess = true; + if (serverList[name].maxRam > 0 && copy === true) { + copyAndRunScript(ns, funnyScript, name); + } + } + } + ns.write("serverList.txt", JSON.stringify(serverList), "w"); + } + ns.tprint("Cracking and rooting done"); +} + + +/** @param {NS} ns */ +export function copyAndRunScript(ns, funnyScript, currentServer) { + // change to run for one specific server with bestTarget from file + //let minRam = ns.getScriptRam(funnyScript); + let bestTarget = JSON.parse(ns.read("bestTarget.txt")); + + let name = currentServer; + let serverList = JSON.parse(ns.read("serverList.txt")); + ns.print(name); + if (serverList[name].rootAccess === true && serverList[bestTarget.serverName].rootAccess === true) { + if (name !== "home") { + ns.print("killed threads on: " + name + ns.killall(name, true)); + } else { + ns.print("killed threads on: " + name + ns.scriptKill(funnyScript[0], name)); + }; + //move script and run + if (serverList[name].maxRam > 0) { + ns.scp(funnyScript, name, "home"); + let maxProcesses = 1; + if (serverList[name].maxRam >= 8) { + maxProcesses = Math.max(Math.floor((serverList[name].maxRam) / 8), 1); + } else { + maxProcesses = 1 + }; + + for (let n = 1; n <= maxProcesses; n++) { + ns.exec(funnyScript[0], name, 1, bestTarget.serverName); + } + + /*let maxThreads = 0; + if (name === "home") { + maxThreads = Math.floor((serverList[name].maxRam - ns.getServerUsedRam(name) - 32) / minRam); + ns.print(name + " " + maxThreads); + } else { + ns.print(name); + maxThreads = Math.floor(serverList[name].maxRam / minRam); + ns.print(name + " " + maxThreads); + }; + while (maxThreads > 0) { + let threadsToAssign = maxThreads < 500 ? maxThreads : 500; + if (ns.exec(funnyScript, name, threadsToAssign, bestTarget.serverName, serverList[bestTarget.serverName].minSec, serverList[bestTarget.serverName].maxMoney, JSON.stringify(serverList[bestTarget.serverName])) !== 0) { + ns.print("Executing script on: " + name + " with: " + threadsToAssign + " threads out of " + maxThreads + " total threads"); + maxThreads = maxThreads - threadsToAssign; + } else { + ns.tprint("Error running script on: " + name); + maxThreads = -1; + }; + }*/ + } + } +} + +/** @param {NS} ns */ +export async function purchaseAndUpgradeServers(ns) { + ns.disableLog("sleep"); + ns.disableLog("getServerMoneyAvailable"); + ns.disableLog("getServerMaxRam"); + let maxPurchasedServers = ns.getPurchasedServerLimit(); + let purchasedServers = []; + let count = listPurchasedServers(ns).length; + let currentMoney = 0; + let serverList = {}; + while (count < maxPurchasedServers) { + purchasedServers = listPurchasedServers(ns); + currentMoney = ns.getServerMoneyAvailable("home"); + let targetRamInitial = 16; + if (ns.getPurchasedServerCost(targetRamInitial) < currentMoney) { + let hostname = ns.purchaseServer("pserv-" + purchasedServers.length, 16); + + count = listPurchasedServers(ns).length; + serverList = JSON.parse(ns.read("serverList.txt")); + serverList[hostname] = { + serverName: hostname, + maxRam: 16, + maxMoney: 0, + minSec: 0, + minPorts: 5, + minHackLvl: 1, + rootAccess: true, + factorMoneyPerTime: 99999999, + openPorts: 0, + }; + ns.write("serverList.txt", JSON.stringify(serverList), "w"); + continue + } else { + await ns.sleep(5000); + } + } + + let i = 5; + while (i < 21) { + let targetRam = 2 ** i; + purchasedServers = listPurchasedServers(ns); + for (let currentServer of purchasedServers) { + currentMoney = ns.getServerMoneyAvailable("home"); + + if (ns.getServerMaxRam(currentServer) < targetRam && ns.getPurchasedServerUpgradeCost(currentServer, targetRam) < currentMoney) { + if (ns.upgradePurchasedServer(currentServer, targetRam)) { + ns.print(currentServer + " upgraded to " + targetRam + " GB RAM"); + serverList = JSON.parse(ns.read("serverList.txt")); + serverList[currentServer].maxRam = targetRam; + ns.write("serverList.txt", JSON.stringify(serverList), "w"); + } + } else { + await ns.sleep(5000); + continue + }; + } + ++i; + } + ns.tprint("Extiting purchaseServers script!") +} + +/** @param {NS} ns */ +function listPurchasedServers(ns) { + return ns.getPurchasedServers(); +} + +/** @param {NS} ns */ +export async function runControllerOnPserv(ns) { + let purchasedServers = listPurchasedServers(ns); + let nPID = 0; + nPID = ns.exec("S2controller.js", "home"); + ns.tprint("Started S2controller.js on " + "home" + " with PID " + nPID) + for (let currentServer of purchasedServers) { + ns.scp(["S2tGrow.js", "S2tWeaken.js", "S2tHack.js", "S2controller.js", "S2utils.js"], currentServer, "home"); + nPID = ns.exec("S2controller.js", currentServer); + if (nPID > 0) { + ns.tprint("Started S2controller.js on " + currentServer + " with PID " + nPID) + } + } +} + +/** @param {NS} ns */ +export async function backdoor(ns) { + let serverList = JSON.parse(ns.read("serverList.txt")); + let lasthackingLevel = 0; + let currentHackingLevel = 0; + while (true) { + currentHackingLevel = ns.getHackingLevel(); + if (currentHackingLevel > lasthackingLevel) { + lasthackingLevel = currentHackingLevel; + for (const [name, entry] of Object.entries(serverList)) { + if (entry.minHackLvl <= lasthackingLevel && entry.hasBackdoor !== true) { + ns.singularity.connect(name); + await ns.singularity.installBackdoor(); + ns.singularity.connect("home"); + serverList[name].hasBackdoor = true; + ns.tprint("Backdoor on: " + name); + } + } + ns.write("serverList.txt", JSON.stringify(serverList), "w"); + } else { + await ns.sleep(30000) + }; + + } +} diff --git a/local/path/home/Ramses/S4controller.js b/local/path/home/Ramses/S4controller.js new file mode 100644 index 0000000..46bd6ae --- /dev/null +++ b/local/path/home/Ramses/S4controller.js @@ -0,0 +1,491 @@ +/* +Welcome to part 4. A continuous batcher is a major hurdle compared to everything we've done so far. The number +and complexity of the challenges increases drastically when trying to keep everything running indefinitely. +With luck, the overengineering we've done so far will have well prepared us for the challenges of a periodic +batcher. + +Technically, I use quite a few JIT techniques in this batcher, but I don't consider it a true JIT batcher +as it doesn't take full advantage of the potential RAM efficiency. Instead, I favor simpler logic, while still +allowing the batcher to make certain adjustments if it needs to. + +When it comes to continuous batchers, performance is king. We're going to aim for 5ms spacing as we have +throughout this guide so far, but there's a lot we need to do in those 5ms. As such, we need to make sure that +we choose which operations to do carefully, as well as when to do them and how to make sure they are as fast +as we can make them. +*/ + +// One new utility. A custom data structure for managing our schedule. You can see the details in utils.js +import { getServers, copyScripts, checkTarget, isPrepped, prep, Deque } from "/S4utils.js"; + +const TYPES = ["hack", "weaken1", "grow", "weaken2"]; +const WORKERS = ["S4tHack.js", "S4tWeaken.js", "S4tGrow.js"]; +const SCRIPTS = { hack: "S4tHack.js", weaken1: "S4tWeaken.js", grow: "S4tGrow.js", weaken2: "S4tWeaken.js" }; +const COSTS = { hack: 1.7, weaken1: 1.75, grow: 1.75, weaken2: 1.75 }; +// const OFFSETS = { hack: 0, weaken1: 1, grow: 2, weaken2: 3 }; + +// A new optional constant. The RamNet will attempt to reserve this much ram at home. +// You can set it to 0 if you don't want to reserve anything, and setting it too high will just reserve as much as possible. +const RESERVED_HOME_RAM = 0; + +// A brand new class to help keep our increasingly complex logic organized. +class ContinuousBatcher { + #ns; // The ns object. Stored as a class variable to save me the trouble of passing it all the time. + + // The usual data we've grown familiar with by now. + #metrics; + #ramNet; + #target; + #schedule; + #dataPort; + #batchCount = 0; + #desyncs = 0; // This is mostly used for logging purposes, since the batcher is self-correcting. + + // A capital M Map. We'll use this to keep track of active jobs. + #running = new Map(); + + constructor(ns, metrics, ramNet) { + this.#ns = ns; + this.#metrics = metrics; + this.#ramNet = ramNet; + this.#target = metrics.target; + this.#dataPort = ns.getPortHandle(ns.pid); + + // Seeding the first ending time. + this.#metrics.end = Date.now() + metrics.wTime - metrics.spacer; + + // The new schedule I promised. It's a double-ended queue, but we'll mostly just be using it as a normal queue. + // It has a static size, so we make sure it can accomodate all of our jobs. + this.#schedule = new Deque(metrics.depth * 4); + } + + // This is a function that can schedule a given number of batches. + // With no arguments, it just fills up the queue. + scheduleBatches(batches = this.#metrics.depth) { + while (this.#schedule.size < batches * 4) { + ++this.#batchCount; + for (const type of TYPES) { + this.#metrics.end += this.#metrics.spacer; + const job = new Job(type, this.#metrics, this.#batchCount); + + /* + We don't actually error out if a job can't be assigned anymore. Instead, we just assign as much + as we can. If it desyncs, the logic will correct it, and if a weaken2 gets cancelled then the actual + depth will naturally decrease below the target depth. Not a perfect fix, but better than breaking. + */ + if (!this.#ramNet.assign(job)) { + this.#ns.tprint(`WARN: Insufficient RAM to assign ${job.type}: ${job.batch}.`); + continue; + } + this.#schedule.push(job); + } + } + } + + // The function for deploying jobs. Very similar to the code from our shotgun batcher with some minor changes. + async deploy() { + // The for loop is replaced by a while loop, since our Deque isn't iterable. + while (!this.#schedule.isEmpty()) { + const job = this.#schedule.shift(); + job.end += this.#metrics.delay; + const jobPid = this.#ns.exec(SCRIPTS[job.type], job.server, { threads: job.threads, temporary: true }, JSON.stringify(job)); + if (!jobPid) throw new Error(`Unable to deploy ${job.type}`); + const tPort = this.#ns.getPortHandle(jobPid); + + // We save the pid for later. + job.pid = jobPid; + await tPort.nextWrite(); + + // Jobs can be late as long as the delay won't cause collisions. + this.#metrics.delay += Math.max(Math.ceil(tPort.read()) - this.#metrics.spacer, 0); + this.#running.set(job.id, job); + } + + // After the loop, we adjust future job ends to account for the delay, then discard it. + this.#metrics.end += this.#metrics.delay; + this.#metrics.delay = 0; + } + + // Our old timeout function is now a proper function of its own. A few extra baubles in the log, but nothing exciting. + /** @param {NS} ns */ + log() { + const ns = this.#ns; + const metrics = this.#metrics; + const ramNet = this.#ramNet; + ns.clearLog(); + ns.print(`Hacking ~\$${ns.formatNumber(metrics.maxMoney * metrics.greed * metrics.chance / (4 * metrics.spacer) * 1000)}/s from ${metrics.target}`); + ns.print(`Status: ${isPrepped(ns, this.#target) ? "Prepped" : "Desynced"}`); + ns.print(`Security: +${metrics.minSec - metrics.sec}`); + ns.print(`Money: \$${ns.formatNumber(metrics.money, 2)}/${ns.formatNumber(metrics.maxMoney, 2)}`); + ns.print(`Greed: ${Math.floor(metrics.greed * 1000) / 10}%`); + ns.print(`Ram available: ${ns.formatRam(ramNet.totalRam)}/${ns.formatRam(ramNet.maxRam)}`); + ns.print(`Active jobs: ${this.#running.size}/${metrics.depth * 4}`); + + // You'll see what this line's about in a moment. + if (this.#desyncs) ns.print(`Hacks cancelled by desync: ${this.#desyncs}`); + } + + // The core loop of our batcher logic. Quite lean with everything neatly divided into functions, but there's still + // plenty going on here. + async run() { + // First we do some initial setup, this is essentially firing off a shotgun blast to get us started. + const dataPort = this.#dataPort; + this.scheduleBatches(); + await this.deploy(); + await this.#ns.sleep(0); // This is probably pointless. I forget why I put it here. + this.log(); + while (true) { + // Wait for the nextWrite, as usual. + await dataPort.nextWrite(); + + // Sometimes there's a delay and more than one job writes to the port at once. + // We make sure to handle it all before we move on. + while (!dataPort.empty()) { + // Workers now report unique identifiers (type + batchnumber) used to find them on the map. + const data = dataPort.read(); + + // Free up the ram, them remove them from the active list. + // The check handles a corner case where a hack gets "cancelled" after it's already finished. + if (this.#running.has(data)) { + this.#ramNet.finish(this.#running.get(data)); + this.#running.delete(data); + } + + // If it's a W2, we've got an opening to do some work. + if (data.startsWith("weaken2")) { + // Recalculate times. Threads too, but only if prepped (the logic is in the function itself). + this.#metrics.calculate(this.#ns); + + /* + This is probably the most JIT-like aspect of the entire batcher. If the server isn't prepped, then + we cancel the next hack to let the server fix itself. Between this and the extra 1% grow threads, level + ups are completely handled. Rapid level ups can lead to a lot of lost jobs, but eventually the program + stabilizes. + + There are probably more efficient ways to do this. Heck, even this solution could be optimized better, + but for now, this is an adequate demonstration of a reasonable non-formulas solution to the level up + problem. It also lets us dip our toes into JIT logic in preparation for the final part. + */ + if (!isPrepped(this.#ns, this.#target)) { + const id = "hack" + (parseInt(data.slice(7)) + 1); + const cancel = this.#running.get(id); + // Just in case the hack was already aborted somehow. + if (cancel) { + this.#ramNet.finish(cancel); + this.#ns.kill(cancel.pid); + this.#running.delete(id); + ++this.#desyncs; // Just to keep track of how much we've lost keeping things prepped. + } + } + + // Then of course we just schedule and deploy a new batch. + this.scheduleBatches(1); + await this.deploy(); + this.log(); + } + } + } + } +} + +/* + Our poor "main" function isn't much more than a kickstart for our new batcher object. It's a bit weird having + it wedged between objects like this, but I wanted to have the new functionality up at the top since most of the + remaining code hasn't changed much. I'll comment the changes anyway. +*/ +/** @param {NS} ns */ +export async function main(ns) { + ns.disableLog("ALL"); + ns.tail(); + + /* + This commented out code is for a debugging tool that centralizes logs from the worker scripts into one place. + It's main advantage is the ability to write txt logs to file, which can be perused later to track down errors. + You can uncomment it if you'd like to see a live stream of workers finishing without flooding the terminal. + + If you do, make sure to search the file for -LOGGING and uncomment all relevant lines. + */ + // if (ns.isRunning("S4logHelper.js", "home")) ns.kill("S4logHelper.js", "home"); + // const logPort = ns.exec("S4logHelper.js", "home"); + // ns.atExit(() => ns.closeTail(logPort)); + + // Setup is mostly the same. + const dataPort = ns.getPortHandle(ns.pid); + dataPort.clear(); + let target = ns.args[0] ? ns.args[0] : "n00dles"; + while (true) { + const servers = getServers(ns, (server) => { + if (!ns.args[0]) target = checkTarget(ns, server, target, ns.fileExists("Formulas.exe", "home")); + copyScripts(ns, server, WORKERS, true); + return ns.hasRootAccess(server); + }); + const ramNet = new RamNet(ns, servers); + const metrics = new Metrics(ns, target); + // metrics.log = logPort; // Uncomment for -LOGGING. + if (!isPrepped(ns, target)) await prep(ns, metrics, ramNet); + ns.clearLog(); + ns.print("Optimizing. This may take a few seconds..."); + + // Optimizer has changed again. Back to being synchronous, since the performance is much better. + optimizePeriodic(ns, metrics, ramNet); + metrics.calculate(ns); + + // Create and run our batcher. + const batcher = new ContinuousBatcher(ns, metrics, ramNet); + await batcher.run(); + + /* + You might be wondering why I put this in a while loop and then just return here. The simple answer is that + it's because this is meant to be run in a loop, but I didn't implement the logic for it. This version of the + batcher is completely static once created. It sticks to a single greed value, and doesn't update if more + RAM becomes available. + + In a future version, you'd want some logic to allow the batcher to choose new targets, update its available RAM, + and create new batchers during runtime. For now, that's outside the scope of this guide, but consider this loop + as a sign of what could be. + */ + return; + } +} + +// The Job class, lean as it is, remains mostly unchanged. I got rid of the server argument since I wasn't using it +// and added a batch number instead. +class Job { + constructor(type, metrics, batch) { + this.type = type; + // this.end = metrics.ends[type]; + this.end = metrics.end; + this.time = metrics.times[type]; + this.target = metrics.target; + this.threads = metrics.threads[type]; + this.cost = this.threads * COSTS[type]; + this.server = "none"; + this.report = true; + this.port = metrics.port; + this.batch = batch; + + // The future is now. The status and id are used for interacting with the Deque and Map in our batcher class. + this.status = "active"; + this.id = type + batch; + // this.log = metrics.log; // -LOGGING + } +} + +// The only change to the metrics class is the calculate function skipping threadcounts if the server isn't prepped. +/** @param {NS} ns */ +class Metrics { + constructor(ns, server) { + this.target = server; + this.maxMoney = ns.getServerMaxMoney(server); + this.money = Math.max(ns.getServerMoneyAvailable(server), 1); + this.minSec = ns.getServerMinSecurityLevel(server); + this.sec = ns.getServerSecurityLevel(server); + this.prepped = isPrepped(ns, server); + this.chance = 0; + this.wTime = 0; + this.delay = 0; + this.spacer = 5; + this.greed = 0.01; + this.depth = 0; // The number of concurrent batches to run. Set by the optimizer. + + this.times = { hack: 0, weaken1: 0, grow: 0, weaken2: 0 }; + this.end = 0; // Slight change for the new timing. The old way in commented out in case I switch back later. + // this.ends = { hack: 0, weaken1: 0, grow: 0, weaken2: 0 }; + this.threads = { hack: 0, weaken1: 0, grow: 0, weaken2: 0 }; + + this.port = ns.pid; + } + + calculate(ns, greed = this.greed) { + const server = this.target; + const maxMoney = this.maxMoney; + this.money = ns.getServerMoneyAvailable(server); + this.sec = ns.getServerSecurityLevel(server); + this.wTime = ns.getWeakenTime(server); + this.times.weaken1 = this.wTime; + this.times.weaken2 = this.wTime; + this.times.hack = this.wTime / 4; + this.times.grow = this.wTime * 0.8; + // this.depth = this.wTime / this.spacer * 4; + + if (isPrepped(ns, server)) { // The only change. + const hPercent = ns.hackAnalyze(server); + const amount = maxMoney * greed; + const hThreads = Math.max(Math.floor(ns.hackAnalyzeThreads(server, amount)), 1); + const tGreed = hPercent * hThreads; + const gThreads = Math.ceil(ns.growthAnalyze(server, maxMoney / (maxMoney - maxMoney * tGreed)) * 1.01); + this.threads.weaken1 = Math.max(Math.ceil(hThreads * 0.002 / 0.05), 1); + this.threads.weaken2 = Math.max(Math.ceil(gThreads * 0.004 / 0.05), 1); + this.threads.hack = hThreads; + this.threads.grow = gThreads; + this.chance = ns.hackAnalyzeChance(server); + } + } +} + +// A few minor edits here. An unused "simulation" mode, and reserved RAM on home. +/** @param {NS} ns */ +class RamNet { + #blocks = []; + #minBlockSize = Infinity; + #maxBlockSize = 0; + #totalRam = 0; + #prepThreads = 0; + #maxRam = 0; + #index = new Map(); + + // Simulate mode ignores running scripts. Can be used to make calculations while the batcher is operating. + constructor(ns, servers, simulate = false) { + for (const server of servers) { + if (ns.hasRootAccess(server)) { + const maxRam = ns.getServerMaxRam(server); + // Save some extra ram on home. Clamp used ram to maxRam to prevent negative numbers. + const reserved = (server === "home") ? RESERVED_HOME_RAM : 0; + const used = Math.min((simulate ? 0 : ns.getServerUsedRam(server)) + reserved, maxRam); + const ram = maxRam - used; + if (maxRam > 0) { + const block = { server: server, ram: ram }; + this.#blocks.push(block); + if (ram < this.#minBlockSize) this.#minBlockSize = ram; + if (ram > this.#maxBlockSize) this.#maxBlockSize = ram; + this.#totalRam += ram; + this.#maxRam += maxRam; + this.#prepThreads += Math.floor(ram / 1.75); + } + } + } + this.#sort(); + this.#blocks.forEach((block, index) => this.#index.set(block.server, index)); + } + + #sort() { + this.#blocks.sort((x, y) => { + if (x.server === "home") return 1; + if (y.server === "home") return -1; + + return x.ram - y.ram; + }); + } + + get totalRam() { + return this.#totalRam; + } + + get maxRam() { + return this.#maxRam; + } + + get maxBlockSize() { + return this.#maxBlockSize; + } + + get prepThreads() { + return this.#prepThreads; + } + + getBlock(server) { + if (this.#index.has(server)) { + return this.#blocks[this.#index.get(server)]; + } else { + throw new Error(`Server ${server} not found in RamNet.`); + } + } + + assign(job) { + const block = this.#blocks.find(block => block.ram >= job.cost); + if (block) { + job.server = block.server; + block.ram -= job.cost; + this.#totalRam -= job.cost; + return true; + } else return false; + } + + finish(job) { + const block = this.getBlock(job.server); + block.ram += job.cost; + this.#totalRam += job.cost; + } + + cloneBlocks() { + return this.#blocks.map(block => ({ ...block })); + } + + printBlocks(ns) { + for (const block of this.#blocks) ns.print(block); + } + + testThreads(threadCosts) { + const pRam = this.cloneBlocks(); + let batches = 0; + let found = true; + while (found) { + for (const cost of threadCosts) { + found = false; + const block = pRam.find(block => block.ram >= cost); + if (block) { + block.ram -= cost; + found = true; + } else break; + } + if (found) batches++; + } + return batches; + } +} + +// Quite a bit has changed in this one. It's back to being synchronous, though it can still take a while. +/** + * @param {NS} ns + * @param {Metrics} metrics + * @param {RamNet} ramNet + */ +function optimizePeriodic(ns, metrics, ramNet) { + const maxThreads = ramNet.maxBlockSize / 1.75; + const maxMoney = metrics.maxMoney; + const hPercent = ns.hackAnalyze(metrics.target); + const wTime = ns.getWeakenTime(metrics.target); + + const minGreed = 0.001; + const maxSpacer = wTime; // This is more of an infinite loop safety net than anything. + const stepValue = 0.01; + let greed = 0.95; // Capping greed a bit lower. I don't have a compelling reason for this. + let spacer = metrics.spacer; // We'll be adjusting the spacer in low ram conditions to allow smaller depths. + + while (greed > minGreed && spacer < maxSpacer) { + // We calculate a max depth based on the spacer, then add one as a buffer. + const depth = Math.ceil(wTime / (4 * spacer)) + 1; + const amount = maxMoney * greed; + const hThreads = Math.max(Math.floor(ns.hackAnalyzeThreads(metrics.target, amount)), 1); + const tGreed = hPercent * hThreads; + const gThreads = Math.ceil(ns.growthAnalyze(metrics.target, maxMoney / (maxMoney - maxMoney * tGreed)) * 1.01); + if (Math.max(hThreads, gThreads) <= maxThreads) { + const wThreads1 = Math.max(Math.ceil(hThreads * 0.002 / 0.05), 1); + const wThreads2 = Math.max(Math.ceil(gThreads * 0.004 / 0.05), 1); + + const threadCosts = [hThreads * 1.7, wThreads1 * 1.75, gThreads * 1.75, wThreads2 * 1.75]; + + // Glad I kept these, they turned out to be useful after all. When trying to hit target depth, + // checking that there's actually enough theoretical ram to fit them is a massive boost to performance. + const totalCost = threadCosts.reduce((t, c) => t + c) * depth; + if (totalCost < ramNet.totalRam) { + // Double check that we can actually fit our threads into ram, then set our metrics and return. + const batchCount = ramNet.testThreads(threadCosts); + if (batchCount >= depth) { + metrics.spacer = spacer; + metrics.greed = greed; + metrics.depth = depth; + return + } + } + } + // await ns.sleep(0); // Uncomment and make the function async if you don't like the freeze on startup. + + // Decrement greed until we hit the minimum, then reset and increment spacer. We'll find a valid configuration eventually. + greed -= stepValue; + if (greed < minGreed && spacer < maxSpacer) { + greed = 0.99; + ++spacer; + } + } + throw new Error("Not enough ram to run even a single batch. Something has gone seriously wrong."); +} \ No newline at end of file diff --git a/local/path/home/Ramses/S4logHelper.js b/local/path/home/Ramses/S4logHelper.js new file mode 100644 index 0000000..e1e8bdc --- /dev/null +++ b/local/path/home/Ramses/S4logHelper.js @@ -0,0 +1,42 @@ +/* + This script is completely unchanged from the last part. As a note, if you find that saves are taking a very long time + it may help to disable txt logging when you aren't actively debugging. The log files generated by this script + are quite big even when it's erasing the data on each new instance. +*/ + +/** @param {NS} ns */ +export async function main(ns) { + + const logFile = "S4log.txt"; + ns.clear(logFile); // Clear the previous log for each instance. + ns.disableLog("ALL"); + ns.tail(); + ns.moveTail(200, 200); // Move it out of the way so it doesn't cover up the controller. + const logPort = ns.getPortHandle(ns.pid); + logPort.clear(); + + // Pretty simple. Just wait until something writes to the log and save the info. + // Writes to its own console as well as a text file. + let max = 0; + let count = 0; + let total = 0; + let errors = 0; + while (true) { + await logPort.nextWrite(); + do { + const data = logPort.read(); + // if (data > max) max = data; + // if (data > 5) ++errors; + // total += data; + // ++count; + // ns.clearLog(); + // ns.print(`Max desync: ${max}`); + // ns.print(`Average desync: ${total / count}`); + // ns.print(`Errors: ${errors}`); + + // if (data.startsWith("WARN")) ns.print(data); + ns.print(data); + // ns.write(logFile, data); // Comment this line out to disable txt logging. + } while (!logPort.empty()); + } +} \ No newline at end of file diff --git a/local/path/home/Ramses/S4tGrow.js b/local/path/home/Ramses/S4tGrow.js new file mode 100644 index 0000000..ad26af7 --- /dev/null +++ b/local/path/home/Ramses/S4tGrow.js @@ -0,0 +1,30 @@ +/* + A pretty big change this time. Well, big for workers anyway. I've tightened up the delay calculations + to be as perfect as I can get them. Full comments in weaken.js as usual. +*/ + +/** @param {NS} ns */ +export async function main(ns) { + const start = performance.now(); + const port = ns.getPortHandle(ns.pid); + const job = JSON.parse(ns.args[0]); + let tDelay = 0; + let delay = job.end - job.time - Date.now(); + if (delay < 0) { + ns.tprint(`WARN: Batch ${job.batch} ${job.type} was ${-delay}ms late. (${job.end})\n`); + tDelay = -delay + delay = 0; + } + const promise = ns.grow(job.target, { additionalMsec: delay }); + tDelay += performance.now() - start; + port.write(tDelay); + await promise; + + ns.atExit(() => { + const end = Date.now(); + if (job.report) ns.writePort(job.port, job.type + job.batch); + // Uncomment one of these if you want to log completed jobs. Make sure to uncomment the appropriate lines in the controller as well. + // ns.tprint(`Batch ${job.batch}: ${job.type} finished at ${end.toString().slice(-6)}/${Math.round(job.end).toString().slice(-6)}\n`); + // ns.writePort(job.log, `Batch ${job.batch}: ${job.type} finished at ${end.toString().slice(-6)}/${Math.round(job.end + tDelay).toString().slice(-6)}\n`); + }); +} \ No newline at end of file diff --git a/local/path/home/Ramses/S4tHack.js b/local/path/home/Ramses/S4tHack.js new file mode 100644 index 0000000..a5ad51e --- /dev/null +++ b/local/path/home/Ramses/S4tHack.js @@ -0,0 +1,30 @@ +/* + A pretty big change this time. Well, big for workers anyway. I've tightened up the delay calculations + to be as perfect as I can get them. Full comments in weaken.js as usual. +*/ + +/** @param {NS} ns */ +export async function main(ns) { + const start = performance.now(); + const port = ns.getPortHandle(ns.pid); + const job = JSON.parse(ns.args[0]); + let tDelay = 0; + let delay = job.end - job.time - Date.now(); + if (delay < 0) { + ns.tprint(`WARN: Batch ${job.batch} ${job.type} was ${-delay}ms late. (${job.end})\n`); + tDelay = -delay + delay = 0; + } + const promise = ns.hack(job.target, { additionalMsec: delay }); + tDelay += performance.now() - start; + port.write(tDelay); + await promise; + + ns.atExit(() => { + const end = Date.now(); + if (job.report) ns.writePort(job.port, job.type + job.batch); + // Uncomment one of these if you want to log completed jobs. Make sure to uncomment the appropriate lines in the controller as well. + // ns.tprint(`Batch ${job.batch}: ${job.type} finished at ${end.toString().slice(-6)}/${Math.round(job.end).toString().slice(-6)}\n`); + // ns.writePort(job.log, `Batch ${job.batch}: ${job.type} finished at ${end.toString().slice(-6)}/${Math.round(job.end + tDelay).toString().slice(-6)}\n`); + }); +} \ No newline at end of file diff --git a/local/path/home/Ramses/S4tWeaken.js b/local/path/home/Ramses/S4tWeaken.js new file mode 100644 index 0000000..b81ac60 --- /dev/null +++ b/local/path/home/Ramses/S4tWeaken.js @@ -0,0 +1,41 @@ +/* + A pretty big change this time. Well, big for workers anyway. I've tightened up the delay calculations + to be as perfect as I can get them. Full comments in weaken.js as usual. +*/ + +/** @param {NS} ns */ +export async function main(ns) { + const start = performance.now(); + const port = ns.getPortHandle(ns.pid); // We have to define this here. You'll see why in a moment. + const job = JSON.parse(ns.args[0]); + let tDelay = 0; + let delay = job.end - job.time - Date.now(); + + // Don't report delay right away. + if (delay < 0) { + ns.tprint(`WARN: Batch ${job.batch} ${job.type} was ${-delay}ms late. (${job.end})\n`); + tDelay = -delay + delay = 0; + } + + // The actual function call can take some time, so instead of awaiting on it right away, we save the promise for later. + const promise = ns.weaken(job.target, { additionalMsec: delay }); + + // Then after calling the hack function, we calculate our final delay and report it to the controller. + tDelay += performance.now() - start; + + // The ns object is tied up by the promise, so invoking it now would cause a concurrency error. + // That's why we fetched this handle earlier. + port.write(tDelay); + + // Then we finally await the promise. This should give millisecond-accurate predictions for the end time of a job. + await promise; + + ns.atExit(() => { + const end = Date.now(); + if (job.report) ns.writePort(job.port, job.type + job.batch); + // Uncomment one of these if you want to log completed jobs. Make sure to uncomment the appropriate lines in the controller as well. + // ns.tprint(`Batch ${job.batch}: ${job.type} finished at ${end.toString().slice(-6)}/${Math.round(job.end).toString().slice(-6)}\n`); + // ns.writePort(job.log, `Batch ${job.batch}: ${job.type} finished at ${end.toString().slice(-6)}/${Math.round(job.end + tDelay).toString().slice(-6)}\n`); + }); +} \ No newline at end of file diff --git a/local/path/home/Ramses/S4utils.js b/local/path/home/Ramses/S4utils.js new file mode 100644 index 0000000..e465b19 --- /dev/null +++ b/local/path/home/Ramses/S4utils.js @@ -0,0 +1,420 @@ +/* + We've got a brand new class to look at, but the rest of the file remains unchanged. +*/ + +/** @param {NS} ns */ +export async function main(ns) { + ns.tprint("This is just a function library, it doesn't do anything."); +} + +/* + This is an overengineered abomination of a custom data structure. It is essentially a double-ended queue, + but also has a Map stapled to it, just in case we need to access items by id (we don't.) + + The idea is that it can fetch/peek items from the front or back with O(1) timing. This gets around the issue of + dynamic arrays taking O(n) time to shift, which is terrible behavior for very long queues like the one we're using. +*/ +export class Deque { + #capacity = 0; // The maximum length. + #length = 0; // The actual number of items in the queue + #front = 0; // The index of the "head" where data is read from the queue. + #deleted = 0; // The number of "dead" items in the queue. These occur when items are deleted by index. They are bad. + #elements; // An inner array to store the data. + #index = new Map(); // A hash table to track items by ID. Try not to delete items using this, it's bad. + + // Create a new queue with a specific capacity. + constructor(capacity) { + this.#capacity = capacity; + this.#elements = new Array(capacity); + } + + // You can also convert arrays. + static fromArray(array, overallocation = 0) { + const result = new Deque(array.length + overallocation); + array.forEach(item => result.push(item)); + return result; + } + + // Deleted items don't count towards length, but they still take up space in the array until they can be cleared. + // Seriously, don't use the delete function unless it's absolutely necessary. + get size() { + return this.#length - this.#deleted; + } + + isEmpty() { + return this.#length - this.#deleted === 0; + } + + // Again, "deleted" items still count towards this. Use caution. + isFull() { + return this.#length === this.#capacity; + } + + // The "tail" where data is typically written to. + // Unlike the front, which points at the first piece of data, this point at the first empty slot. + get #back() { + return (this.#front + this.#length) % this.#capacity; + } + + // Push a new element into the queue. + push(value) { + if (this.isFull()) { + throw new Error("The deque is full. You cannot add more items."); + } + this.#elements[this.#back] = value; + this.#index.set(value.id, this.#back); + ++this.#length; + } + + // Pop an item off the back of the queue. + pop() { + while (!this.isEmpty()) { + --this.#length; + const item = this.#elements[this.#back]; + this.#elements[this.#back] = undefined; // Free up the item for garbage collection. + this.#index.delete(item.id); // Don't confuse index.delete() with this.delete() + if (item.status !== "deleted") return item; // Clear any "deleted" items we encounter. + else --this.#deleted; // If you needed another reason to avoid deleting by ID, this breaks the O(1) time complexity. + } + throw new Error("The deque is empty. You cannot delete any items."); + } + + // Shift an item off the front of the queue. This is the main method for accessing data. + shift() { + while (!this.isEmpty()) { + // Our pointer already knows exactly where the front of the queue is. This is much faster than the array equivalent. + const item = this.#elements[this.#front]; + this.#elements[this.#front] = undefined; + this.#index.delete(item.id); + + // Move the head up and wrap around if we reach the end of the array. This is essentially a circular buffer. + this.#front = (this.#front + 1) % this.#capacity; + --this.#length; + if (item.status !== "deleted") return item; + else --this.#deleted; + } + throw new Error("The deque is empty. You cannot delete any items."); + } + + // Place an item at the front of the queue. Slightly slower than pushing, but still faster than doing it on an array. + unshift(value) { + if (this.isFull()) { + throw new Error("The deque is full. You cannot add more items."); + } + this.#front = (this.#front - 1 + this.#capacity) % this.#capacity; + this.#elements[this.#front] = value; + this.#index.set(value.id, this.#front); + ++this.#length; + } + + // Peeking at the front is pretty quick, since the head is already looking at it. We just have to clear those pesky "deleted" items first. + peekFront() { + if (this.isEmpty()) { + throw new Error("The deque is empty. You cannot peek."); + } + + while (this.#elements[this.#front].status === "deleted") { + this.#index.delete(this.#elements[this.#front]?.id); + this.#elements[this.#front] = undefined; + this.#front = (this.#front + 1) % this.#capacity; + --this.#deleted; + --this.#length; + + if (this.isEmpty()) { + throw new Error("The deque is empty. You cannot peek."); + } + } + return this.#elements[this.#front]; + } + + // Peeking at the back is ever so slightly slower, since we need to recalculate the pointer. + // It's a tradeoff for the faster push function, and it's a very slight difference either way. + peekBack() { + if (this.isEmpty()) { + throw new Error("The deque is empty. You cannot peek."); + } + + let back = (this.#front + this.#length - 1) % this.#capacity; + while (this.#elements[back].status === "deleted") { + this.#index.delete(this.#elements[back].id); + this.#elements[back] = undefined; + back = (back - 1 + this.#capacity) % this.#capacity; + --this.#deleted; + --this.#length; + + if (this.isEmpty()) { + throw new Error("The deque is empty. You cannot peek."); + } + } + + return this.#elements[back]; + } + + // Fill the queue with a single value. + fill(value) { + while (!this.isFull()) { + this.push(value); + } + } + + // Empty the whole queue. + clear() { + while (!this.isEmpty()) { + this.pop(); + } + } + + // Check if an ID exists. + exists(id) { + return this.#index.has(id); + } + + // Fetch an item by ID + get(id) { + let pos = this.#index.get(id); + return pos !== undefined ? this.#elements[pos] : undefined; + } + + // DON'T + delete(id) { + let item = this.get(id); + if (item !== undefined) { + item.status = "deleted"; + ++this.#deleted; + return item; + } else { + throw new Error("Item not found in the deque."); + } + } +} + +// The recursive server navigation algorithm. The lambda predicate determines which servers to add to the final list. +// You can also plug other functions into the lambda to perform other tasks that check all servers at the same time. +/** @param {NS} ns */ +export function getServers(ns, lambdaCondition = () => true, hostname = "home", servers = [], visited = []) { + if (visited.includes(hostname)) return; + visited.push(hostname); + if (lambdaCondition(hostname)) servers.push(hostname); + const connectedNodes = ns.scan(hostname); + if (hostname !== "home") connectedNodes.shift(); + for (const node of connectedNodes) getServers(ns, lambdaCondition, node, servers, visited); + return servers; +} + +// Here are a couple of my own getServers modules. +// This one finds the best target for hacking. It tries to balance expected return with time taken. +/** @param {NS} ns */ +export function checkTarget(ns, server, target = "n00dles", forms = false) { + if (!ns.hasRootAccess(server)) return target; + const player = ns.getPlayer(); + const serverSim = ns.getServer(server); + const pSim = ns.getServer(target); + let previousScore; + let currentScore; + if (serverSim.requiredHackingSkill <= player.skills.hacking / (forms ? 1 : 2)) { + if (forms) { + serverSim.hackDifficulty = serverSim.minDifficulty; + pSim.hackDifficulty = pSim.minDifficulty; + previousScore = pSim.moneyMax / ns.formulas.hacking.weakenTime(pSim, player) * ns.formulas.hacking.hackChance(pSim, player); + currentScore = serverSim.moneyMax / ns.formulas.hacking.weakenTime(serverSim, player) * ns.formulas.hacking.hackChance(serverSim, player); + } else { + const weight = (serv) => { + // Calculate the difference between max and available money + let diff = serv.moneyMax - serv.moneyAvailable; + + // Calculate the scaling factor as the ratio of the difference to the max money + // The constant here is just an adjustment to fine tune the influence of the scaling factor + let scalingFactor = diff / serv.moneyMax * 0.95; + + // Adjust the weight based on the difference, applying the scaling penalty + return (serv.moneyMax / serv.minDifficulty) * (1 - scalingFactor); + } + previousScore = weight(pSim) + currentScore = weight(serverSim) + } + if (currentScore > previousScore) target = server; + } + return target; +} + +// A simple function for copying a list of scripts to a server. +/** @param {NS} ns */ +export function copyScripts(ns, server, scripts, overwrite = false) { + for (const script of scripts) { + if ((!ns.fileExists(script, server) || overwrite) && ns.hasRootAccess(server)) { + ns.scp(script, server); + } + } +} + +// A generic function to check that a given server is prepped. Mostly just a convenience. +export function isPrepped(ns, server) { + const tolerance = 0.0001; + const maxMoney = ns.getServerMaxMoney(server); + const money = ns.getServerMoneyAvailable(server); + const minSec = ns.getServerMinSecurityLevel(server); + const sec = ns.getServerSecurityLevel(server); + const secFix = Math.abs(sec - minSec) < tolerance; + return (money === maxMoney && secFix) ? true : false; +} + +/* + This prep function isn't part of the tutorial, but the rest of the code wouldn't work without it. + I don't make any guarantees, but I've been using it and it's worked well enough. I'll comment it anyway. + The prep strategy uses a modified proto-batching technique, which will be covered in part 2. +*/ +/** @param {NS} ns */ +export async function prep(ns, values, ramNet) { + const maxMoney = values.maxMoney; + const minSec = values.minSec; + let money = values.money; + let sec = values.sec; + while (!isPrepped(ns, values.target)) { + const wTime = ns.getWeakenTime(values.target); + const gTime = wTime * 0.8; + const dataPort = ns.getPortHandle(ns.pid); + dataPort.clear(); + + const pRam = ramNet.cloneBlocks(); + const maxThreads = Math.floor(ramNet.maxBlockSize / 1.75); + const totalThreads = ramNet.prepThreads; + let wThreads1 = 0; + let wThreads2 = 0; + let gThreads = 0; + let batchCount = 1; + let script, mode; + /* + Modes: + 0: Security only + 1: Money only + 2: One shot + */ + + if (money < maxMoney) { + gThreads = Math.ceil(ns.growthAnalyze(values.target, maxMoney / money)); + wThreads2 = Math.ceil(ns.growthAnalyzeSecurity(gThreads) / 0.05); + } + if (sec > minSec) { + wThreads1 = Math.ceil((sec - minSec) * 20); + if (!(wThreads1 + wThreads2 + gThreads <= totalThreads && gThreads <= maxThreads)) { + gThreads = 0; + wThreads2 = 0; + batchCount = Math.ceil(wThreads1 / totalThreads); + if (batchCount > 1) wThreads1 = totalThreads; + mode = 0; + } else mode = 2; + } else if (gThreads > maxThreads || gThreads + wThreads2 > totalThreads) { + mode = 1; + const oldG = gThreads; + wThreads2 = Math.max(Math.floor(totalThreads / 13.5), 1); + gThreads = Math.floor(wThreads2 * 12.5); + batchCount = Math.ceil(oldG / gThreads); + } else mode = 2; + + // Big buffer here, since all the previous calculations can take a while. One second should be more than enough. + const wEnd1 = Date.now() + wTime + 1000; + const gEnd = wEnd1 + values.spacer; + const wEnd2 = gEnd + values.spacer; + + // "metrics" here is basically a mock Job object. Again, this is just an artifact of repurposed old code. + const metrics = { + batch: "prep", + target: values.target, + type: "none", + time: 0, + end: 0, + port: ns.pid, + log: values.log, + report: false + }; + + // Actually assigning threads. We actually allow grow threads to be spread out in mode 1. + // This is because we don't mind if the effect is a bit reduced from higher security unlike a normal batcher. + // We're not trying to grow a specific amount, we're trying to grow as much as possible. + for (const block of pRam) { + while (block.ram >= 1.75) { + const bMax = Math.floor(block.ram / 1.75) + let threads = 0; + if (wThreads1 > 0) { + script = "S4tWeaken.js"; + metrics.type = "pWeaken1"; + metrics.time = wTime; + metrics.end = wEnd1; + threads = Math.min(wThreads1, bMax); + if (wThreads2 === 0 && wThreads1 - threads <= 0) metrics.report = true; + wThreads1 -= threads; + } else if (wThreads2 > 0) { + script = "S4tWeaken.js"; + metrics.type = "pWeaken2"; + metrics.time = wTime; + metrics.end = wEnd2; + threads = Math.min(wThreads2, bMax); + if (wThreads2 - threads === 0) metrics.report = true; + wThreads2 -= threads; + } else if (gThreads > 0 && mode === 1) { + script = "S4tGrow.js"; + metrics.type = "pGrow"; + metrics.time = gTime; + metrics.end = gEnd; + threads = Math.min(gThreads, bMax); + metrics.report = false; + gThreads -= threads; + } else if (gThreads > 0 && bMax >= gThreads) { + script = "S4tGrow.js"; + metrics.type = "pGrow"; + metrics.time = gTime; + metrics.end = gEnd; + threads = gThreads; + metrics.report = false; + gThreads = 0; + } else break; + metrics.server = block.server; + const pid = ns.exec(script, block.server, { threads: threads, temporary: true }, JSON.stringify(metrics)); + if (!pid) throw new Error("Unable to assign all jobs."); + block.ram -= 1.75 * threads; + } + } + + // Fancy UI stuff to update you on progress. + const tEnd = ((mode === 0 ? wEnd1 : wEnd2) - Date.now()) * batchCount + Date.now(); + const timer = setInterval(() => { + ns.clearLog(); + switch (mode) { + case 0: + ns.print(`Weakening security on ${values.target}...`); + break; + case 1: + ns.print(`Maximizing money on ${values.target}...`); + break; + case 2: + ns.print(`Finalizing preparation on ${values.target}...`); + } + ns.print(`Security: +${ns.formatNumber(sec - minSec, 3)}`); + ns.print(`Money: \$${ns.formatNumber(money, 2)}/${ns.formatNumber(maxMoney, 2)}`); + const time = tEnd - Date.now(); + ns.print(`Estimated time remaining: ${ns.tFormat(time)}`); + ns.print(`~${batchCount} ${(batchCount === 1) ? "batch" : "batches"}.`); + }, 200); + ns.atExit(() => clearInterval(timer)); + + // Wait for the last weaken to finish. + do await dataPort.nextWrite(); while (!dataPort.read().startsWith("pWeaken")); + clearInterval(timer); + await ns.sleep(100); + + money = ns.getServerMoneyAvailable(values.target); + sec = ns.getServerSecurityLevel(values.target); + } + return true; +} + +// I don't actually use this anywhere it the code. It's a debugging tool that I use to test the runtimes of functions. +export function benchmark(lambda) { + let result = 0; + for (let i = 0; i <= 1000; ++i) { + const start = performance.now(); + lambda(i); + result += performance.now() - start; + } + return result / 1000; +} \ No newline at end of file diff --git a/local/path/home/Ramses/Serverlist.js b/local/path/home/Ramses/Serverlist.js new file mode 100644 index 0000000..8b94568 --- /dev/null +++ b/local/path/home/Ramses/Serverlist.js @@ -0,0 +1,42 @@ +import { getCracks, scanServerList, findBestTarget, crackingAndRooting, copyAndRunScript, purchaseAndUpgradeServers } from "/RamsesUtils.js"; + +/** @param {NS} ns */ +export async function main(ns) { + const funnyScript = ["batch.js", "Ramses-grow.js", "Ramses-weaken.js", "Ramses-hack.js"]; + //write function to purchase scripts from tor network and rerun getCracks() then recrack and reroot + let cracks = {}; + cracks = getCracks(ns); + let maxPorts = Object.keys(cracks).length; + scanServerList(ns); + let manualTargetOverride = ""; + if (ns.getHackingLevel() < 200) { + manualTargetOverride = "n00dles"; + }; + + findBestTarget(ns, 999, maxPorts, ns.getHackingLevel(), manualTargetOverride); + let bestTarget = ns.read("bestTarget.txt") + + ns.tprint("Best Target: " + bestTarget); + ns.tprint(Object.keys(JSON.parse(ns.read("serverList.txt"))).length); + crackingAndRooting(ns, cracks, funnyScript, true); + ns.exec(funnyScript[0], "home", 1, JSON.parse(bestTarget).serverName, 500, true); + let reset = ns.args[0]; + ns.print(reset); + if (reset === true) { + ns.tprint("reset === true") + findBestTarget(ns, 999, maxPorts, ns.getHackingLevel(), manualTargetOverride); + let serverList = JSON.parse(ns.read("serverList.txt")); + for (const [name, entry] of Object.entries(serverList)) { + + copyAndRunScript(ns, funnyScript, name); + } + } + /*let serverListForFiles = JSON.parse(ns.read("serverList.txt")); + for (const [name2, entry2] of Object.entries(serverListForFiles)) { + ns.tprint(name2 + " Files: " + entry2.serverFiles) + }*/ + //await ns.sleep(500000); + await purchaseAndUpgradeServers(ns); + +} + diff --git a/local/path/home/Ramses/analyzeContract.js b/local/path/home/Ramses/analyzeContract.js new file mode 100644 index 0000000..2d2b025 --- /dev/null +++ b/local/path/home/Ramses/analyzeContract.js @@ -0,0 +1,21 @@ +/** @param {NS} ns */ +export async function main(ns) { + const sTarget = ns.args[0]; // target server which has the contract + const sContract = ns.args[1]; // target contract file + + //ns.tprint(ns.codingcontract.getContractTypes()); + //ns.codingcontract.createDummyContract(); + + const sContractType = ns.codingcontract.getContractType(sContract, sTarget); + const sContractData = ns.codingcontract.getData(sContract, sTarget); + const sContractDescription = ns.codingcontract.getDescription(sContract, sTarget); + const sContractTries = ns.codingcontract.getNumTriesRemaining(sContract, sTarget); + + + ns.tprint("sContractType = " + sContractType); + ns.tprint("sContractData = " + sContractData); + ns.tprint("sContractDescription = " + sContractDescription); + ns.tprint("sContractTries = " + sContractTries); + + JSON.stringify(sContractType,sContractType, true); +} \ No newline at end of file diff --git a/local/path/home/Ramses/corp/Autosell.js b/local/path/home/Ramses/corp/Autosell.js new file mode 100644 index 0000000..ae30880 --- /dev/null +++ b/local/path/home/Ramses/corp/Autosell.js @@ -0,0 +1,7 @@ +/** @param {NS} ns */ +export async function main(ns) { + let cities = ["Sector-12", "Aevum", "Volhaven", "Chongqing", "New Tokyo", "Ishima"]; + let corpName = "AgraNeo"; + ns.tprint(ns.corporation.getMaterial(corpName,cities[0],"Plants")) + ns.corporation.sellMaterial() +} \ No newline at end of file diff --git a/local/path/home/Ramses/corp/HireWorkers.js b/local/path/home/Ramses/corp/HireWorkers.js new file mode 100644 index 0000000..22fefa0 --- /dev/null +++ b/local/path/home/Ramses/corp/HireWorkers.js @@ -0,0 +1,16 @@ +/** @param {NS} ns */ +export async function main(ns) { + let cities = ["Sector-12", "Aevum", "Volhaven", "Chongqing", "New Tokyo", "Ishima"]; + let corpName = "ChemNeo"; + let currentSize = 0; + for (let city of cities) { + + let currentOffice=(ns.corporation.getOffice(corpName, city)); + if (currentOffice.numEmployees < currentOffice.size) { + (currentOffice.employeeJobs.Operations < 1) ? ns.corporation.hireEmployee(corpName, city,"Operations") : ""; + (currentOffice.employeeJobs.Engineer < 1) ? ns.corporation.hireEmployee(corpName, city,"Engineer") : ""; + (currentOffice.employeeJobs.Business < 1) ? ns.corporation.hireEmployee(corpName, city,"Business") : ""; + (currentOffice.employeeJobs.Management < 1) ? ns.corporation.hireEmployee(corpName, city,"Management") : ""; + }; + } +} \ No newline at end of file diff --git a/local/path/home/Ramses/corp/SetupExport.js b/local/path/home/Ramses/corp/SetupExport.js new file mode 100644 index 0000000..69c0dd4 --- /dev/null +++ b/local/path/home/Ramses/corp/SetupExport.js @@ -0,0 +1,13 @@ +/** @param {NS} ns */ +export async function main(ns) { + let cities = ["Sector-12", "Aevum", "Volhaven", "Chongqing", "New Tokyo", "Ishima"]; + let corpName1 = ["AgraNeo","Plants"]; + let corpName2 = ["ChemNeo","Chemicals"]; + let exportString = "IPROD*-1" + for (let city of cities) { + ns.corporation.cancelExportMaterial(corpName1[0],city,corpName2[0],city,corpName1[1]); + ns.corporation.cancelExportMaterial(corpName2[0],city,corpName1[0],city,corpName2[1]); + ns.corporation.exportMaterial(corpName1[0],city,corpName2[0],city,corpName1[1],exportString); + ns.corporation.exportMaterial(corpName2[0],city,corpName1[0],city,corpName2[1],exportString); + } +} \ No newline at end of file diff --git a/local/path/home/Ramses/corp/Smart.js b/local/path/home/Ramses/corp/Smart.js new file mode 100644 index 0000000..e733958 --- /dev/null +++ b/local/path/home/Ramses/corp/Smart.js @@ -0,0 +1,6 @@ +/** @param {NS} ns */ +export async function main(ns) { + let [corpName, city] = ns.args; + //ns.corporation.setSmartSupply(corpName, city, true); + return true; +} \ No newline at end of file diff --git a/local/path/home/Ramses/corp/UpgradeOffice.js b/local/path/home/Ramses/corp/UpgradeOffice.js new file mode 100644 index 0000000..e811f44 --- /dev/null +++ b/local/path/home/Ramses/corp/UpgradeOffice.js @@ -0,0 +1,12 @@ +/** @param {NS} ns */ +export async function main(ns) { + let cities = ["Sector-12", "Aevum", "Volhaven", "Chongqing", "New Tokyo", "Ishima"]; + let corpName = "ChemNeo"; + let currentSize = 0; + for (let city of cities) { + currentSize = ns.corporation.getOffice(corpName, city).size; + if (currentSize < 4) { + ns.corporation.upgradeOfficeSize(corpName, city, 4 - currentSize); + }; + } +} \ No newline at end of file diff --git a/local/path/home/Ramses/killAllScript.js b/local/path/home/Ramses/killAllScript.js new file mode 100644 index 0000000..cc54271 --- /dev/null +++ b/local/path/home/Ramses/killAllScript.js @@ -0,0 +1,8 @@ +/** @param {NS} ns */ +export async function main(ns) { + let serverList = JSON.parse(ns.read("serverList.txt")); + for (const [name, entry] of Object.entries(serverList)) { + + ns.killall(name, true) + } +} diff --git a/local/path/home/Ramses/purchaseServers.js b/local/path/home/Ramses/purchaseServers.js new file mode 100644 index 0000000..e79b9cb --- /dev/null +++ b/local/path/home/Ramses/purchaseServers.js @@ -0,0 +1,5 @@ +import { getCracks, scanServerList, findBestTarget, crackingAndRooting, copyAndRunScript, purchaseAndUpgradeServers } from "/RamsesUtils.js"; +/** @param {NS} ns */ +export async function main(ns) { + await purchaseAndUpgradeServers(ns); +} \ No newline at end of file