Philipp
2024-10-14 22:48:21 +02:00
53 changed files with 250 additions and 1970 deletions

4
.gitignore vendored
View File

@@ -1,3 +1 @@
Mizzajl/home/bestTarget.txt Mizzajl/home/*.txt
Mizzajl/home/ServerRouteList.txt
Mizzajl/home/serverList.txt

View File

@@ -1,6 +1,7 @@
/** @param {NS} ns */ /** @param {NS} ns */
export async function main(ns) { export async function main(ns) {
let aCitites = ["Sector-12", "Aevum", "Volhaven", "Chongqing", "New Tokyo", "Ishima"]; let aCitites = ["Sector-12", "Aevum", "Volhaven", "Chongqing", "New Tokyo", "Ishima"];
let sCorpName = "RM Enterprises";
let sDivisionName = "Agri-Ram"; let sDivisionName = "Agri-Ram";
let reset = ns.args[0]; let reset = ns.args[0];
if (reset === true || !ns.fileExists("CorpStatus.txt")) { if (reset === true || !ns.fileExists("CorpStatus.txt")) {
@@ -14,6 +15,14 @@ export async function main(ns) {
let oOfficeData = {}; let oOfficeData = {};
let oWarehouseData = {}; let oWarehouseData = {};
let bUnlockStatus = false; let bUnlockStatus = false;
nListenPID = ns.run("/corp/HasCorp.js");
await ns.nextPortWrite(nListenPID);
let bCorpExists = ns.readPort(nListenPID);
if (!bCorpExists) {
nListenPID = ns.run("/corp/CreateCorp.js", 1, sCorpName);
await ns.nextPortWrite(nListenPID);
}
if (!ns.readPort(nListenPID)) { ns.tprint("Error! Couldn't start Corporation!"); return };
ns.tprint(oCorpStatus); ns.tprint(oCorpStatus);
while (nStep < 199) { while (nStep < 199) {
nStep = oCorpStatus.nStep; nStep = oCorpStatus.nStep;
@@ -75,13 +84,26 @@ export async function main(ns) {
nListenPID = ns.run("/corp/GetOfficeData.js", 1, sDivisionName, sCity); nListenPID = ns.run("/corp/GetOfficeData.js", 1, sDivisionName, sCity);
await ns.nextPortWrite(nListenPID); await ns.nextPortWrite(nListenPID);
oOfficeData = ns.readPort(nListenPID); oOfficeData = ns.readPort(nListenPID);
ns.tprint(oOfficeData);
nListenPID = ns.run("/corp/HireWorkers.js", 1, sDivisionName, sCity, JSON.stringify(oOfficeData)); nListenPID = ns.run("/corp/HireWorkers.js", 1, sDivisionName, sCity, JSON.stringify(oOfficeData));
await ns.nextPortWrite(nListenPID); await ns.nextPortWrite(nListenPID);
} }
oCorpStatus.nStep = 200; oCorpStatus.nStep = 50;
break; break;
case 50: case 50:
let aCurrentGoods = [
{ "sMaterialName": "Food", "yAmount": "MAX", "yPrice": "MP-5" },
{ "sMaterialName": "Plants", "yAmount": "MAX", "yPrice": "MP-5" }
]
for (let sCity of aCitites) {
aCurrentGoods.forEach(async (entry) => {
nListenPID = ns.run("/corp/SetSalePrices.js", 1, sDivisionName, sCity, JSON.stringify(entry));
nListenPID = ns.run("/corp/SetMaterialLimit.js", 1, sDivisionName, sCity, JSON.stringify(entry));
ns.tprint(sCity, entry)
})
}
oCorpStatus.nStep = 200;
break; break;
case 60: case 60:
break; break;

View File

@@ -1 +0,0 @@
{"nStep":0}

View File

@@ -0,0 +1,12 @@
import { distributeScript } from "./Library";
/** @param {NS} ns */
export async function main(ns) {
const sScript = ns.args[0];
const nThreads = ns.args[1];
const sTarget = ns.args[3];
ns.tail();
await distributeScript(ns, sScript, nThreads, sTarget);
}

View File

@@ -71,7 +71,7 @@ export async function main(ns) {
ns.scp(sLibraryScript, entry.serverName, "home"); ns.scp(sLibraryScript, entry.serverName, "home");
ns.scp(sSettingsFile, entry.serverName, "home"); ns.scp(sSettingsFile, entry.serverName, "home");
ns.exec(sScript, entry.serverName, 1, sTarget, nFrequency, true, false); ns.exec(sScript, entry.serverName, 1, sTarget, nFrequency, true, false);
await ns.sleep(1); //await ns.sleep(1);
} }
else if (entry.maxRam < 64 && entry.maxRam > 0 && entry.rootAccess === true) { else if (entry.maxRam < 64 && entry.maxRam > 0 && entry.rootAccess === true) {
ns.scriptKill(sScript, entry.serverName); ns.scriptKill(sScript, entry.serverName);

View File

@@ -88,7 +88,7 @@ export function findBestTarget(ns, maxSec, maxPorts, currentHackLevel, manualTar
bestEntry = null; bestEntry = null;
let nMaxMoneyPerChance = 0; let nMaxMoneyPerChance = 0;
let nBestMoneyPerChance = 0; let nBestMoneyPerChance = 0;
serverList.forEach((entry)=> { serverList.forEach((entry) => {
if (entry.minSec <= maxSec && entry.minPorts <= maxPorts && entry.minHackLvl <= currentHackLevel) { if (entry.minSec <= maxSec && entry.minPorts <= maxPorts && entry.minHackLvl <= currentHackLevel) {
nMaxMoneyPerChance = (entry.maxMoney * ns.hackAnalyzeChance(entry.serverName)) / entry.minSec; nMaxMoneyPerChance = (entry.maxMoney * ns.hackAnalyzeChance(entry.serverName)) / entry.minSec;
if (nMaxMoneyPerChance > nBestMoneyPerChance) { if (nMaxMoneyPerChance > nBestMoneyPerChance) {
@@ -96,7 +96,7 @@ export function findBestTarget(ns, maxSec, maxPorts, currentHackLevel, manualTar
bestEntry = entry.serverName; bestEntry = entry.serverName;
} }
} }
}) })
} }
ns.write("bestTarget.txt", JSON.stringify(serverList.find((entry) => entry.serverName === bestEntry)), "w"); ns.write("bestTarget.txt", JSON.stringify(serverList.find((entry) => entry.serverName === bestEntry)), "w");
} }
@@ -203,20 +203,21 @@ export async function purchaseAndUpgradeServers(ns) {
purchasedServers = listPurchasedServers(ns); purchasedServers = listPurchasedServers(ns);
for (let currentServer of purchasedServers) { for (let currentServer of purchasedServers) {
currentMoney = ns.getServerMoneyAvailable("home"); currentMoney = ns.getServerMoneyAvailable("home");
if (ns.getServerMaxRam(currentServer) < targetRam){ if (ns.getServerMaxRam(currentServer) < targetRam) {
if (ns.getPurchasedServerUpgradeCost(currentServer, targetRam) < currentMoney) { if (ns.getPurchasedServerUpgradeCost(currentServer, targetRam) < currentMoney) {
if (ns.upgradePurchasedServer(currentServer, targetRam)) { if (ns.upgradePurchasedServer(currentServer, targetRam)) {
ns.print(currentServer + " upgraded to " + targetRam + " GB RAM"); ns.print(currentServer + " upgraded to " + targetRam + " GB RAM");
serverList = JSON.parse(ns.read("serverList.txt")); serverList = JSON.parse(ns.read("serverList.txt"));
let newServerList = serverList.map((entry) => { if (entry.serverName === currentServer){ entry.maxRam = targetRam} return entry}); let newServerList = serverList.map((entry) => { if (entry.serverName === currentServer) { entry.maxRam = targetRam } return entry });
ns.write("serverList.txt", JSON.stringify(newServerList), "w"); ns.write("serverList.txt", JSON.stringify(newServerList), "w");
ns.tprint(cCyan + "Server: " + currentServer + " upgraded to: " + targetRam.toLocaleString() + " GB" + cReset); ns.tprint(cCyan + "Server: " + currentServer + " upgraded to: " + targetRam.toLocaleString() + " GB" + cReset);
ns.toast("Server: " + currentServer + " upgraded to: " + targetRam.toLocaleString() + " GB", "info", 10000); ns.toast("Server: " + currentServer + " upgraded to: " + targetRam.toLocaleString() + " GB", "info", 10000);
}
} else {
await ns.sleep(5000);
continue
} }
} else { };
await ns.sleep(5000);
continue
}};
} }
++i; ++i;
} }
@@ -278,8 +279,8 @@ export function listWorkServers(ns) {
if (entry.rootAccess && entry.maxRam >= 1) { if (entry.rootAccess && entry.maxRam >= 1) {
let sServerName = entry.serverName.padEnd(nServerColumnWidth, ' '); let sServerName = entry.serverName.padEnd(nServerColumnWidth, ' ');
let nFreeRAM = entry.maxRam - ns.getServerUsedRam(entry.serverName); let nFreeRAM = entry.maxRam - ns.getServerUsedRam(entry.serverName);
let sFreeRAM = Math.floor(nFreeRAM).toLocaleString("en-US").padStart(nValueColumnWidth, ' '); let sFreeRAM = ns.formatRam(nFreeRAM).padStart(nValueColumnWidth, ' ');
let sMaxRam = entry.maxRam.toLocaleString("en-US").padStart(nValueColumnWidth, ' '); let sMaxRam = ns.formatRam(entry.maxRam).padStart(nValueColumnWidth, ' ');
let sCores = entry.serverCores.toLocaleString("en-US").padStart(nCoresWidth, ' '); let sCores = entry.serverCores.toLocaleString("en-US").padStart(nCoresWidth, ' ');
ns.printRaw(`${sServerName}${sFreeRAM} / ${sMaxRam}${sCores}`); ns.printRaw(`${sServerName}${sFreeRAM} / ${sMaxRam}${sCores}`);
nTotalWorkerRAM += entry.maxRam; nTotalWorkerRAM += entry.maxRam;
@@ -291,6 +292,10 @@ export function listWorkServers(ns) {
} }
export function sortJsonArrayByKey(array, primaryKey, secondaryKey) { export function sortJsonArrayByKey(array, primaryKey, secondaryKey) {
if (typeof (array) === "string") {
array = JSON.parse(array);
}
return array.sort((a, b) => { return array.sort((a, b) => {
const xPrimary = a[primaryKey]; const xPrimary = a[primaryKey];
const yPrimary = b[primaryKey]; const yPrimary = b[primaryKey];
@@ -328,3 +333,43 @@ export function sortJsonArrayByKey(array, primaryKey, secondaryKey) {
} }
} }
/** @param {NS} ns */
export async function distributeScript(ns, sScript, nThreads, ...args) {
ns.tail();
ns.disableLog("ALL");
const sListName = "serverList.txt";
if (!ns.fileExists(sListName, "home")) { ns.print(`ERROR ${sListName} does not exist.`); return false; };
let sServerList = ns.read(sListName);
let aSortedList = sortJsonArrayByKey(sServerList, "serverCores", "maxRam").reverse();
ns.print(sScript);
ns.print(nThreads);
ns.print(...args);
const nScriptSize = ns.getScriptRam(sScript, "home");
let nTotalSize = nScriptSize * nThreads;
ns.print("nScriptSize = " + nScriptSize);
ns.print("nTotalSize = " + nTotalSize);
aSortedList.forEach((entry) => {
let sHost = entry.serverName;
ns.print("sHost = " + sHost)
ns.scp(sScript, sHost);
let nFreeRAM = ns.getServerMaxRam(entry.serverName) - ns.getServerUsedRam(entry.serverName);
if (nThreads >= 1 && entry.rootAccess && nFreeRAM >= nTotalSize) {
ns.exec(sScript, sHost, nThreads, ...args);
nThreads = 0;
nFreeRAM = ns.getServerMaxRam(entry.serverName) - ns.getServerUsedRam(entry.serverName);
}
else if (nThreads >= 1 && entry.rootAccess && nFreeRAM >= nScriptSize) {
let nThreadsDist = Math.floor(nFreeRAM / nScriptSize);
ns.print("room for : " + nThreadsDist + " scripts");
ns.exec(sScript, sHost, nThreadsDist, ...args);
nThreads -= nThreadsDist;
nFreeRAM = ns.getServerMaxRam(entry.serverName) - ns.getServerUsedRam(entry.serverName);
}
});
}

View File

@@ -1,97 +0,0 @@
/** @param {NS} ns */
export async function main(ns, allServers) {
ns.tail();
await scanRecursiveWrapper(ns);
let currentHackingLevel = ns.getHackingLevel();
let currentArray = [];
let currentHop = "";
let serverRoutes = JSON.parse(ns.read("ServerRouteList.txt"));
let allPaths = getPaths(serverRoutes);
let checkAll = ns.args[0];
for (const entry of allPaths) {
for (const name of entry) {
if (ns.singularity.connect(name) === false) {
ns.tprint("Error when trying to connect to: " + currentHop);
return
}
if (ns.getServer(name).hostname === "CSEC" || ns.getServer(name).hostname === "avmnite-02h" || ns.getServer(name).hostname === "I.I.I.I" || ns.getServer(name).hostname === "run4theh111z" || ns.getServer(name).hostname === "The-Cave" || checkAll === true ) {
if (!ns.getServer(name).backdoorInstalled) {
if (ns.getServerRequiredHackingLevel(name) < currentHackingLevel && ns.hasRootAccess(name) === true) {
ns.print("Trying to backdoor " + name)
await ns.singularity.installBackdoor(name);
ns.print("Success on " + name)
}
} else { continue }
}
}
}
ns.singularity.connect("home");
}
function getPaths(obj, path = []) {
const paths = [];
for (const key in obj) {
const newPath = [...path, key];
paths.push(newPath);
if (typeof obj[key] === 'object' && obj[key] !== null) {
paths.push(...getPaths(obj[key], newPath));
}
}
return paths;
}
/** @param {NS} ns */
async function scanRecursiveWrapper(ns) {
ns.rm("ServerRouteList.txt");
const home = "home";
let serverRouteList = { home: {} };
let knownServers = [];
let unscanned = [];
unscanned.push(home);
knownServers.push(home);
while (unscanned.length > 0) {
let currentServer = unscanned.pop();
let currentChildren = ns.scan(currentServer).filter(element => !knownServers.includes(element));
knownServers = knownServers.concat(currentChildren);
let keyPath = findKeyPath(serverRouteList, currentServer);
let childrenObject = currentChildren.reduce((a, v) => ({ ...a, [v]: {} }), {});
writeValueToPath(serverRouteList, keyPath, childrenObject);
for (let i = 0; i < currentChildren.length; i++) {
let child = currentChildren[i];
unscanned.push(child);
}
}
ns.write("ServerRouteList.txt", JSON.stringify(serverRouteList), "w");
}
function findKeyPath(json, key) {
if (typeof json !== 'object' || json === null) {
return null;
}
if (key in json) {
return key;
}
for (const property in json) {
if (json.hasOwnProperty(property)) {
const path = findKeyPath(json[property], key);
if (path !== null) {
return property + '*' + path;
}
}
}
return null;
}
function writeValueToPath(json, path, value) {
const parts = path.split('*');
let currentObject = json;
for (let i = 0; i < parts.length - 1; i++) {
const part = parts[i];
if (currentObject[part] === undefined) {
currentObject[part] = {};
}
currentObject = currentObject[part];
}
currentObject[parts[parts.length - 1]] = value;
}

View File

@@ -1,161 +0,0 @@
/** @param {NS} ns */
export async function main(ns) {
ns.tprint(ns.codingcontract.getContractTypes())
let testcontract = ns.codingcontract.createDummyContract("Total Ways to Sum")
let contractType = ns.codingcontract.getContractType(testcontract);
ns.tprint(ns.codingcontract.getDescription(testcontract))
let n = ns.codingcontract.getData(testcontract);
ns.tprint("Data: " + n);
let answer = "";
if (contractType === "Find Largest Prime Factor") {
answer = largestPrimeFactor(n);
}
if (contractType === "Subarray with Maximum Sum") {
answer = SubarrayWithMaximumSum(ns, n)
}
if (contractType === "Total Ways to Sum") {
answer = TotalWaysToSum(ns, n)
}
ns.tprint(answer);
ns.tprint(ns.codingcontract.attempt(answer, testcontract));
}
/*
5:
4 1
3 2
3 1 1
2 2 1
2 1 1 1
1 1 1 1 1
6:
5 1
4 2
4 1 1
3 3
3 2 1
3 1 1 1
2 2 2
2 2 1 1
2 1 1 1 1
1 1 1 1 1 1
# Start with one position m filling it with the integers between 1 and target
# For each m, fill the next position n with integers between 1 and m
# Repeat as long as the sum is smaller than target.
# append all iterations to the Array and count
*/
function TotalWaysToSum(ns, target) {
let sumArray = [];
let inputArray = [];
let unfinishedArray = [];
let rollingSum = 0;
for (let i = 1; i < target; i++) {
inputArray.push([i]);
}
let z = 1
while (inputArray.length > 0) {
z++
inputArray.forEach((element) => {
rollingSum = element.reduce((a, b) => a + b, 0);
if (rollingSum === target) {
sumArray.push(element)
} else {
for (let k = 1; k <= element[element.length-1] && k <= target - rollingSum; k++) {
unfinishedArray.push(element.concat([k]))
}
}
}
)
inputArray = unfinishedArray;
}
ns.tprint("Target: " +target)
ns.tprint("Length: " + sumArray.length)
return sumArray.length
}
function SubarrayWithMaximumSum(ns, givenArray) {
let arrayLength = givenArray.length;
let maxSum = -10000;
let runningSum = 0;
for (let i = 1; i <= arrayLength; i++) {
for (let j = 0; j <= arrayLength - i; j++) {
runningSum = eval(givenArray.slice(j, i + j).join('+'));
//ns.tprint("i: "+i+ " j: "+ j + " Array: "+givenArray.slice(j,i+j)+ " eval: "+ givenArray.slice(j,i+j).join('+')+"runningSum: "+runningSum);
if (maxSum < runningSum) { maxSum = runningSum };
}
}
return maxSum
}
function FindLargestPrimeFactor(number) {
let factor = 2;
while (factor * factor <= number) {
if (number % factor === 0) {
number /= factor;
} else {
factor++
}
}
return number;
}
/*
function FindLargestPrimeFactor(n) {
let x = Math.ceil(Math.random()*10);
let y = x;
let d = 1;
while (d === 1) {
x = g(x, n);
y = g(g(y, n), n)
d = gcd(n, Math.abs(x - y))
//ns.tprint("x:" + x + " y: " + y + " d: " + d)
}
if (d === n) {
return ("failure")
}
else {
return (d)
}
}
function g(x, n) {
return (x * x) % n
}
function gcd(a,b) {
a = Math.abs(a);
b = Math.abs(b);
if (b > a) {var temp = a; a = b; b = temp;}
while (true) {
if (b == 0) return a;
a %= b;
if (a == 0) return b;
b %= a;
}
}
function gcd(a, b) {
if (!b) {
return a;
}
return gcd(b, a % b);
}
*/

View File

@@ -1,161 +0,0 @@
/** @param {NS} ns */
export async function main(ns) {
ns.tprint(ns.codingcontract.getContractTypes())
let testcontract = ns.codingcontract.createDummyContract("Total Ways to Sum")
let contractType = ns.codingcontract.getContractType(testcontract);
ns.tprint(ns.codingcontract.getDescription(testcontract))
let n = ns.codingcontract.getData(testcontract);
ns.tprint("Data: " + n);
let answer = "";
if (contractType === "Find Largest Prime Factor") {
answer = largestPrimeFactor(n);
}
if (contractType === "Subarray with Maximum Sum") {
answer = SubarrayWithMaximumSum(ns, n)
}
if (contractType === "Total Ways to Sum") {
answer = TotalWaysToSum(ns, n)
}
ns.tprint(answer);
ns.tprint(ns.codingcontract.attempt(answer, testcontract));
}
/*
5:
4 1
3 2
3 1 1
2 2 1
2 1 1 1
1 1 1 1 1
6:
5 1
4 2
4 1 1
3 3
3 2 1
3 1 1 1
2 2 2
2 2 1 1
2 1 1 1 1
1 1 1 1 1 1
# Start with one position m filling it with the integers between 1 and target
# For each m, fill the next position n with integers between 1 and m
# Repeat as long as the sum is smaller than target.
# append all iterations to the Array and count
*/
function TotalWaysToSum(ns, target) {
let sumArray = [];
let inputArray = [];
let unfinishedArray = [];
let rollingSum = 0;
for (let i = 1; i < target; i++) {
inputArray.push([i]);
}
let z = 1
while (inputArray.length > 0) {
z++
inputArray.forEach((element) => {
rollingSum = element.reduce((a, b) => a + b, 0);
if (rollingSum === target) {
sumArray.push(element)
} else {
for (let k = 1; k <= element[element.length-1] && k <= target - rollingSum; k++) {
unfinishedArray.push(element.concat([k]))
}
}
}
)
inputArray = unfinishedArray;
}
ns.tprint("Target: " +target)
ns.tprint("Length: " + sumArray.length)
return sumArray.length
}
function SubarrayWithMaximumSum(ns, givenArray) {
let arrayLength = givenArray.length;
let maxSum = -10000;
let runningSum = 0;
for (let i = 1; i <= arrayLength; i++) {
for (let j = 0; j <= arrayLength - i; j++) {
runningSum = eval(givenArray.slice(j, i + j).join('+'));
//ns.tprint("i: "+i+ " j: "+ j + " Array: "+givenArray.slice(j,i+j)+ " eval: "+ givenArray.slice(j,i+j).join('+')+"runningSum: "+runningSum);
if (maxSum < runningSum) { maxSum = runningSum };
}
}
return maxSum
}
function FindLargestPrimeFactor(number) {
let factor = 2;
while (factor * factor <= number) {
if (number % factor === 0) {
number /= factor;
} else {
factor++
}
}
return number;
}
/*
function FindLargestPrimeFactor(n) {
let x = Math.ceil(Math.random()*10);
let y = x;
let d = 1;
while (d === 1) {
x = g(x, n);
y = g(g(y, n), n)
d = gcd(n, Math.abs(x - y))
//ns.tprint("x:" + x + " y: " + y + " d: " + d)
}
if (d === n) {
return ("failure")
}
else {
return (d)
}
}
function g(x, n) {
return (x * x) % n
}
function gcd(a,b) {
a = Math.abs(a);
b = Math.abs(b);
if (b > a) {var temp = a; a = b; b = temp;}
while (true) {
if (b == 0) return a;
a %= b;
if (a == 0) return b;
b %= a;
}
}
function gcd(a, b) {
if (!b) {
return a;
}
return gcd(b, a % b);
}
*/

View File

@@ -1,14 +0,0 @@
/** @param {NS} ns */
export async function main(ns) {
let cities = ["Sector-12", "Aevum", "Volhaven", "Chongqing", "New Tokyo", "Ishima"];
let corpName = "AgraNeo";
//ns.tprint(ns.corporation.getConstants())
//ns.corporation.getMaterial();
//ns.corporation.buyMaterial();
for (let city of cities) {
await ns.run("/corp/Smart.js",1,corpName,city);
await ns.tprint(ns.run("/corp/UpgradeOffice.js",1,corpName,city));
await ns.sleep(1000)
}
}

View File

@@ -1,8 +0,0 @@
import { getCracks, scanServerList, findBestTarget, crackingAndRooting, copyAndRunScript } from "/RamsesUtils.js";
/** @param {NS} ns */
export async function main(ns) {
let cracks = {};
cracks = getCracks(ns);
crackingAndRooting(ns, cracks, "", false);
}

View File

@@ -1,278 +0,0 @@
/** @param {NS} ns */
export async function main(ns) {
ns.tprint("This is just a function library, it doesn't do anything.");
}
/** @param {NS} ns */
export function getCracks(ns) {
let cracks = {};
if (ns.fileExists("BruteSSH.exe", "home")) {
cracks["BruteSSH.exe"] = ns.brutessh;
};
if (ns.fileExists("FTPCrack.exe", "home")) {
cracks["FTPCrack.exe"] = ns.ftpcrack;
};
if (ns.fileExists("relaySMTP.exe", "home")) {
cracks["relaySMTP.exe"] = ns.relaysmtp;
};
if (ns.fileExists("HTTPWorm.exe", "home")) {
cracks["HTTPWorm.exe"] = ns.httpworm;
};
if (ns.fileExists("SQLInject.exe", "home")) {
cracks["SQLInject.exe"] = ns.sqlinject;
};
return cracks;
}
/** @param {NS} ns */
export function scanServerList(ns) {
const home = "home";
let serverList = {};
let unscanned = [];
unscanned.push(home);
while (unscanned.length > 0) {
let currentServer = unscanned.pop();
if (!serverList[currentServer]) {
let maxRam = ns.getServerMaxRam(currentServer);
let minPorts = ns.getServerNumPortsRequired(currentServer);
let minSecLevel = ns.getServerMinSecurityLevel(currentServer);
let minHackLevel = ns.getServerRequiredHackingLevel(currentServer);
let rootAccess = ns.hasRootAccess(currentServer);
let serverMoney = ns.getServerMaxMoney(currentServer);
let serverFiles = ns.ls(currentServer);
let skillFactor = (2.5 * minHackLevel * minSecLevel + 500) / (ns.getHackingLevel() + 50);
let compareTimeFactor = serverMoney / skillFactor / 10e7;
serverList[currentServer] =
{
serverName: currentServer,
maxRam: maxRam,
maxMoney: serverMoney,
minSec: minSecLevel,
minPorts: minPorts,
minHackLvl: minHackLevel,
rootAccess: rootAccess,
factorMoneyPerTime: compareTimeFactor,
openPorts: 0,
serverFiles: serverFiles,
};
let neighbours = ns.scan(currentServer);
for (let i = 0; i < neighbours.length; i++) {
let neighbour = neighbours[i];
if (serverList[neighbour]) {
continue
}
unscanned.push(neighbour);
}
}
}
ns.write("serverList.txt", JSON.stringify(serverList), "w");
}
/** @param {NS} ns */
export function findBestTarget(ns, maxSec, maxPorts, currentHackLevel, manualTargetOverride) {
if (!ns.fileExists("serverList.txt", "home")) scanServerList();
let serverList = JSON.parse(ns.read("serverList.txt"));
let bestEntry = null;
let compareTime = 0;
for (const [name, entry] of Object.entries(serverList)) {
if (entry.minSec <= maxSec && entry.minPorts <= maxPorts && entry.minHackLvl < currentHackLevel) {
if (entry.factorMoneyPerTime > compareTime) {
compareTime = entry.factorMoneyPerTime;
bestEntry = name;
}
}
}
if (manualTargetOverride.length > 0) {
bestEntry = manualTargetOverride;
}
ns.write("bestTarget.txt", JSON.stringify(serverList[bestEntry]), "w");
}
/** @param {NS} ns */
export function crackingAndRooting(ns, cracks, funnyScript, copy) {
if (!ns.fileExists("serverList.txt", "home")) scanServerList();
let serverList = JSON.parse(ns.read("serverList.txt"));
for (const [name, entry] of Object.entries(serverList)) {
let cracked = false;
let openPorts = serverList[name].openPorts || 0;
if (entry.minPorts === 0 || (entry.minPorts > openPorts && entry.minPorts <= Object.keys(cracks).length)) {
for (let k = 0; k < entry.minPorts; k++) {
cracks[Object.keys(cracks)[k]](name);
serverList[name].openPorts = k;
}
cracked = true;
}
if (!ns.hasRootAccess(name) && cracked === true) {
ns.nuke(name);
if (ns.hasRootAccess(name)) {
serverList[name].rootAccess = true;
if (serverList[name].maxRam > 0 && copy === true) {
copyAndRunScript(ns, funnyScript, name);
}
}
}
ns.write("serverList.txt", JSON.stringify(serverList), "w");
}
ns.tprint("Cracking and rooting done");
}
/** @param {NS} ns */
export function copyAndRunScript(ns, funnyScript, currentServer) {
// change to run for one specific server with bestTarget from file
//let minRam = ns.getScriptRam(funnyScript);
let bestTarget = JSON.parse(ns.read("bestTarget.txt"));
let name = currentServer;
let serverList = JSON.parse(ns.read("serverList.txt"));
ns.print(name);
if (serverList[name].rootAccess === true && serverList[bestTarget.serverName].rootAccess === true) {
if (name !== "home") {
ns.print("killed threads on: " + name + ns.killall(name, true));
} else {
ns.print("killed threads on: " + name + ns.scriptKill(funnyScript[0], name));
};
//move script and run
if (serverList[name].maxRam > 0) {
ns.scp(funnyScript, name, "home");
let maxProcesses = 1;
if (serverList[name].maxRam >= 8) {
maxProcesses = Math.max(Math.floor((serverList[name].maxRam) / 8), 1);
} else {
maxProcesses = 1
};
for (let n = 1; n <= maxProcesses; n++) {
ns.exec(funnyScript[0], name, 1, bestTarget.serverName);
}
/*let maxThreads = 0;
if (name === "home") {
maxThreads = Math.floor((serverList[name].maxRam - ns.getServerUsedRam(name) - 32) / minRam);
ns.print(name + " " + maxThreads);
} else {
ns.print(name);
maxThreads = Math.floor(serverList[name].maxRam / minRam);
ns.print(name + " " + maxThreads);
};
while (maxThreads > 0) {
let threadsToAssign = maxThreads < 500 ? maxThreads : 500;
if (ns.exec(funnyScript, name, threadsToAssign, bestTarget.serverName, serverList[bestTarget.serverName].minSec, serverList[bestTarget.serverName].maxMoney, JSON.stringify(serverList[bestTarget.serverName])) !== 0) {
ns.print("Executing script on: " + name + " with: " + threadsToAssign + " threads out of " + maxThreads + " total threads");
maxThreads = maxThreads - threadsToAssign;
} else {
ns.tprint("Error running script on: " + name);
maxThreads = -1;
};
}*/
}
}
}
/** @param {NS} ns */
export async function purchaseAndUpgradeServers(ns) {
ns.disableLog("sleep");
ns.disableLog("getServerMoneyAvailable");
ns.disableLog("getServerMaxRam");
let maxPurchasedServers = ns.getPurchasedServerLimit();
let purchasedServers = [];
let count = listPurchasedServers(ns).length;
let currentMoney = 0;
let serverList = {};
while (count < maxPurchasedServers) {
purchasedServers = listPurchasedServers(ns);
currentMoney = ns.getServerMoneyAvailable("home");
let targetRamInitial = 16;
if (ns.getPurchasedServerCost(targetRamInitial) < currentMoney) {
let hostname = ns.purchaseServer("pserv-" + purchasedServers.length, 16);
count = listPurchasedServers(ns).length;
serverList = JSON.parse(ns.read("serverList.txt"));
serverList[hostname] = {
serverName: hostname,
maxRam: 16,
maxMoney: 0,
minSec: 0,
minPorts: 5,
minHackLvl: 1,
rootAccess: true,
factorMoneyPerTime: 99999999,
openPorts: 0,
};
ns.write("serverList.txt", JSON.stringify(serverList), "w");
continue
} else {
await ns.sleep(5000);
}
}
let i = 5;
while (i < 21) {
let targetRam = 2 ** i;
purchasedServers = listPurchasedServers(ns);
for (let currentServer of purchasedServers) {
currentMoney = ns.getServerMoneyAvailable("home");
if (ns.getServerMaxRam(currentServer) < targetRam && ns.getPurchasedServerUpgradeCost(currentServer, targetRam) < currentMoney) {
if (ns.upgradePurchasedServer(currentServer, targetRam)) {
ns.print(currentServer + " upgraded to " + targetRam + " GB RAM");
serverList = JSON.parse(ns.read("serverList.txt"));
serverList[currentServer].maxRam = targetRam;
ns.write("serverList.txt", JSON.stringify(serverList), "w");
}
} else {
await ns.sleep(5000);
continue
};
}
++i;
}
ns.tprint("Extiting purchaseServers script!")
}
/** @param {NS} ns */
function listPurchasedServers(ns) {
return ns.getPurchasedServers();
}
/** @param {NS} ns */
export async function runControllerOnPserv(ns) {
let purchasedServers = listPurchasedServers(ns);
let nPID = 0;
nPID = ns.exec("S2controller.js", "home");
ns.tprint("Started S2controller.js on " + "home" + " with PID " + nPID)
for (let currentServer of purchasedServers) {
ns.scp(["S2tGrow.js", "S2tWeaken.js", "S2tHack.js", "S2controller.js", "S2utils.js"], currentServer, "home");
nPID = ns.exec("S2controller.js", currentServer);
if (nPID > 0) {
ns.tprint("Started S2controller.js on " + currentServer + " with PID " + nPID)
}
}
}
/** @param {NS} ns */
export async function backdoor(ns) {
let serverList = JSON.parse(ns.read("serverList.txt"));
let lasthackingLevel = 0;
let currentHackingLevel = 0;
while (true) {
currentHackingLevel = ns.getHackingLevel();
if (currentHackingLevel > lasthackingLevel) {
lasthackingLevel = currentHackingLevel;
for (const [name, entry] of Object.entries(serverList)) {
if (entry.minHackLvl <= lasthackingLevel && entry.hasBackdoor !== true) {
ns.singularity.connect(name);
await ns.singularity.installBackdoor();
ns.singularity.connect("home");
serverList[name].hasBackdoor = true;
ns.tprint("Backdoor on: " + name);
}
}
ns.write("serverList.txt", JSON.stringify(serverList), "w");
} else {
await ns.sleep(30000)
};
}
}

View File

@@ -1,491 +0,0 @@
/*
Welcome to part 4. A continuous batcher is a major hurdle compared to everything we've done so far. The number
and complexity of the challenges increases drastically when trying to keep everything running indefinitely.
With luck, the overengineering we've done so far will have well prepared us for the challenges of a periodic
batcher.
Technically, I use quite a few JIT techniques in this batcher, but I don't consider it a true JIT batcher
as it doesn't take full advantage of the potential RAM efficiency. Instead, I favor simpler logic, while still
allowing the batcher to make certain adjustments if it needs to.
When it comes to continuous batchers, performance is king. We're going to aim for 5ms spacing as we have
throughout this guide so far, but there's a lot we need to do in those 5ms. As such, we need to make sure that
we choose which operations to do carefully, as well as when to do them and how to make sure they are as fast
as we can make them.
*/
// One new utility. A custom data structure for managing our schedule. You can see the details in utils.js
import { getServers, copyScripts, checkTarget, isPrepped, prep, Deque } from "/S4utils.js";
const TYPES = ["hack", "weaken1", "grow", "weaken2"];
const WORKERS = ["S4tHack.js", "S4tWeaken.js", "S4tGrow.js"];
const SCRIPTS = { hack: "S4tHack.js", weaken1: "S4tWeaken.js", grow: "S4tGrow.js", weaken2: "S4tWeaken.js" };
const COSTS = { hack: 1.7, weaken1: 1.75, grow: 1.75, weaken2: 1.75 };
// const OFFSETS = { hack: 0, weaken1: 1, grow: 2, weaken2: 3 };
// A new optional constant. The RamNet will attempt to reserve this much ram at home.
// You can set it to 0 if you don't want to reserve anything, and setting it too high will just reserve as much as possible.
const RESERVED_HOME_RAM = 0;
// A brand new class to help keep our increasingly complex logic organized.
class ContinuousBatcher {
#ns; // The ns object. Stored as a class variable to save me the trouble of passing it all the time.
// The usual data we've grown familiar with by now.
#metrics;
#ramNet;
#target;
#schedule;
#dataPort;
#batchCount = 0;
#desyncs = 0; // This is mostly used for logging purposes, since the batcher is self-correcting.
// A capital M Map. We'll use this to keep track of active jobs.
#running = new Map();
constructor(ns, metrics, ramNet) {
this.#ns = ns;
this.#metrics = metrics;
this.#ramNet = ramNet;
this.#target = metrics.target;
this.#dataPort = ns.getPortHandle(ns.pid);
// Seeding the first ending time.
this.#metrics.end = Date.now() + metrics.wTime - metrics.spacer;
// The new schedule I promised. It's a double-ended queue, but we'll mostly just be using it as a normal queue.
// It has a static size, so we make sure it can accomodate all of our jobs.
this.#schedule = new Deque(metrics.depth * 4);
}
// This is a function that can schedule a given number of batches.
// With no arguments, it just fills up the queue.
scheduleBatches(batches = this.#metrics.depth) {
while (this.#schedule.size < batches * 4) {
++this.#batchCount;
for (const type of TYPES) {
this.#metrics.end += this.#metrics.spacer;
const job = new Job(type, this.#metrics, this.#batchCount);
/*
We don't actually error out if a job can't be assigned anymore. Instead, we just assign as much
as we can. If it desyncs, the logic will correct it, and if a weaken2 gets cancelled then the actual
depth will naturally decrease below the target depth. Not a perfect fix, but better than breaking.
*/
if (!this.#ramNet.assign(job)) {
this.#ns.tprint(`WARN: Insufficient RAM to assign ${job.type}: ${job.batch}.`);
continue;
}
this.#schedule.push(job);
}
}
}
// The function for deploying jobs. Very similar to the code from our shotgun batcher with some minor changes.
async deploy() {
// The for loop is replaced by a while loop, since our Deque isn't iterable.
while (!this.#schedule.isEmpty()) {
const job = this.#schedule.shift();
job.end += this.#metrics.delay;
const jobPid = this.#ns.exec(SCRIPTS[job.type], job.server, { threads: job.threads, temporary: true }, JSON.stringify(job));
if (!jobPid) throw new Error(`Unable to deploy ${job.type}`);
const tPort = this.#ns.getPortHandle(jobPid);
// We save the pid for later.
job.pid = jobPid;
await tPort.nextWrite();
// Jobs can be late as long as the delay won't cause collisions.
this.#metrics.delay += Math.max(Math.ceil(tPort.read()) - this.#metrics.spacer, 0);
this.#running.set(job.id, job);
}
// After the loop, we adjust future job ends to account for the delay, then discard it.
this.#metrics.end += this.#metrics.delay;
this.#metrics.delay = 0;
}
// Our old timeout function is now a proper function of its own. A few extra baubles in the log, but nothing exciting.
/** @param {NS} ns */
log() {
const ns = this.#ns;
const metrics = this.#metrics;
const ramNet = this.#ramNet;
ns.clearLog();
ns.print(`Hacking ~\$${ns.formatNumber(metrics.maxMoney * metrics.greed * metrics.chance / (4 * metrics.spacer) * 1000)}/s from ${metrics.target}`);
ns.print(`Status: ${isPrepped(ns, this.#target) ? "Prepped" : "Desynced"}`);
ns.print(`Security: +${metrics.minSec - metrics.sec}`);
ns.print(`Money: \$${ns.formatNumber(metrics.money, 2)}/${ns.formatNumber(metrics.maxMoney, 2)}`);
ns.print(`Greed: ${Math.floor(metrics.greed * 1000) / 10}%`);
ns.print(`Ram available: ${ns.formatRam(ramNet.totalRam)}/${ns.formatRam(ramNet.maxRam)}`);
ns.print(`Active jobs: ${this.#running.size}/${metrics.depth * 4}`);
// You'll see what this line's about in a moment.
if (this.#desyncs) ns.print(`Hacks cancelled by desync: ${this.#desyncs}`);
}
// The core loop of our batcher logic. Quite lean with everything neatly divided into functions, but there's still
// plenty going on here.
async run() {
// First we do some initial setup, this is essentially firing off a shotgun blast to get us started.
const dataPort = this.#dataPort;
this.scheduleBatches();
await this.deploy();
await this.#ns.sleep(0); // This is probably pointless. I forget why I put it here.
this.log();
while (true) {
// Wait for the nextWrite, as usual.
await dataPort.nextWrite();
// Sometimes there's a delay and more than one job writes to the port at once.
// We make sure to handle it all before we move on.
while (!dataPort.empty()) {
// Workers now report unique identifiers (type + batchnumber) used to find them on the map.
const data = dataPort.read();
// Free up the ram, them remove them from the active list.
// The check handles a corner case where a hack gets "cancelled" after it's already finished.
if (this.#running.has(data)) {
this.#ramNet.finish(this.#running.get(data));
this.#running.delete(data);
}
// If it's a W2, we've got an opening to do some work.
if (data.startsWith("weaken2")) {
// Recalculate times. Threads too, but only if prepped (the logic is in the function itself).
this.#metrics.calculate(this.#ns);
/*
This is probably the most JIT-like aspect of the entire batcher. If the server isn't prepped, then
we cancel the next hack to let the server fix itself. Between this and the extra 1% grow threads, level
ups are completely handled. Rapid level ups can lead to a lot of lost jobs, but eventually the program
stabilizes.
There are probably more efficient ways to do this. Heck, even this solution could be optimized better,
but for now, this is an adequate demonstration of a reasonable non-formulas solution to the level up
problem. It also lets us dip our toes into JIT logic in preparation for the final part.
*/
if (!isPrepped(this.#ns, this.#target)) {
const id = "hack" + (parseInt(data.slice(7)) + 1);
const cancel = this.#running.get(id);
// Just in case the hack was already aborted somehow.
if (cancel) {
this.#ramNet.finish(cancel);
this.#ns.kill(cancel.pid);
this.#running.delete(id);
++this.#desyncs; // Just to keep track of how much we've lost keeping things prepped.
}
}
// Then of course we just schedule and deploy a new batch.
this.scheduleBatches(1);
await this.deploy();
this.log();
}
}
}
}
}
/*
Our poor "main" function isn't much more than a kickstart for our new batcher object. It's a bit weird having
it wedged between objects like this, but I wanted to have the new functionality up at the top since most of the
remaining code hasn't changed much. I'll comment the changes anyway.
*/
/** @param {NS} ns */
export async function main(ns) {
ns.disableLog("ALL");
ns.tail();
/*
This commented out code is for a debugging tool that centralizes logs from the worker scripts into one place.
It's main advantage is the ability to write txt logs to file, which can be perused later to track down errors.
You can uncomment it if you'd like to see a live stream of workers finishing without flooding the terminal.
If you do, make sure to search the file for -LOGGING and uncomment all relevant lines.
*/
// if (ns.isRunning("S4logHelper.js", "home")) ns.kill("S4logHelper.js", "home");
// const logPort = ns.exec("S4logHelper.js", "home");
// ns.atExit(() => ns.closeTail(logPort));
// Setup is mostly the same.
const dataPort = ns.getPortHandle(ns.pid);
dataPort.clear();
let target = ns.args[0] ? ns.args[0] : "n00dles";
while (true) {
const servers = getServers(ns, (server) => {
if (!ns.args[0]) target = checkTarget(ns, server, target, ns.fileExists("Formulas.exe", "home"));
copyScripts(ns, server, WORKERS, true);
return ns.hasRootAccess(server);
});
const ramNet = new RamNet(ns, servers);
const metrics = new Metrics(ns, target);
// metrics.log = logPort; // Uncomment for -LOGGING.
if (!isPrepped(ns, target)) await prep(ns, metrics, ramNet);
ns.clearLog();
ns.print("Optimizing. This may take a few seconds...");
// Optimizer has changed again. Back to being synchronous, since the performance is much better.
optimizePeriodic(ns, metrics, ramNet);
metrics.calculate(ns);
// Create and run our batcher.
const batcher = new ContinuousBatcher(ns, metrics, ramNet);
await batcher.run();
/*
You might be wondering why I put this in a while loop and then just return here. The simple answer is that
it's because this is meant to be run in a loop, but I didn't implement the logic for it. This version of the
batcher is completely static once created. It sticks to a single greed value, and doesn't update if more
RAM becomes available.
In a future version, you'd want some logic to allow the batcher to choose new targets, update its available RAM,
and create new batchers during runtime. For now, that's outside the scope of this guide, but consider this loop
as a sign of what could be.
*/
return;
}
}
// The Job class, lean as it is, remains mostly unchanged. I got rid of the server argument since I wasn't using it
// and added a batch number instead.
class Job {
constructor(type, metrics, batch) {
this.type = type;
// this.end = metrics.ends[type];
this.end = metrics.end;
this.time = metrics.times[type];
this.target = metrics.target;
this.threads = metrics.threads[type];
this.cost = this.threads * COSTS[type];
this.server = "none";
this.report = true;
this.port = metrics.port;
this.batch = batch;
// The future is now. The status and id are used for interacting with the Deque and Map in our batcher class.
this.status = "active";
this.id = type + batch;
// this.log = metrics.log; // -LOGGING
}
}
// The only change to the metrics class is the calculate function skipping threadcounts if the server isn't prepped.
/** @param {NS} ns */
class Metrics {
constructor(ns, server) {
this.target = server;
this.maxMoney = ns.getServerMaxMoney(server);
this.money = Math.max(ns.getServerMoneyAvailable(server), 1);
this.minSec = ns.getServerMinSecurityLevel(server);
this.sec = ns.getServerSecurityLevel(server);
this.prepped = isPrepped(ns, server);
this.chance = 0;
this.wTime = 0;
this.delay = 0;
this.spacer = 5;
this.greed = 0.01;
this.depth = 0; // The number of concurrent batches to run. Set by the optimizer.
this.times = { hack: 0, weaken1: 0, grow: 0, weaken2: 0 };
this.end = 0; // Slight change for the new timing. The old way in commented out in case I switch back later.
// this.ends = { hack: 0, weaken1: 0, grow: 0, weaken2: 0 };
this.threads = { hack: 0, weaken1: 0, grow: 0, weaken2: 0 };
this.port = ns.pid;
}
calculate(ns, greed = this.greed) {
const server = this.target;
const maxMoney = this.maxMoney;
this.money = ns.getServerMoneyAvailable(server);
this.sec = ns.getServerSecurityLevel(server);
this.wTime = ns.getWeakenTime(server);
this.times.weaken1 = this.wTime;
this.times.weaken2 = this.wTime;
this.times.hack = this.wTime / 4;
this.times.grow = this.wTime * 0.8;
// this.depth = this.wTime / this.spacer * 4;
if (isPrepped(ns, server)) { // The only change.
const hPercent = ns.hackAnalyze(server);
const amount = maxMoney * greed;
const hThreads = Math.max(Math.floor(ns.hackAnalyzeThreads(server, amount)), 1);
const tGreed = hPercent * hThreads;
const gThreads = Math.ceil(ns.growthAnalyze(server, maxMoney / (maxMoney - maxMoney * tGreed)) * 1.01);
this.threads.weaken1 = Math.max(Math.ceil(hThreads * 0.002 / 0.05), 1);
this.threads.weaken2 = Math.max(Math.ceil(gThreads * 0.004 / 0.05), 1);
this.threads.hack = hThreads;
this.threads.grow = gThreads;
this.chance = ns.hackAnalyzeChance(server);
}
}
}
// A few minor edits here. An unused "simulation" mode, and reserved RAM on home.
/** @param {NS} ns */
class RamNet {
#blocks = [];
#minBlockSize = Infinity;
#maxBlockSize = 0;
#totalRam = 0;
#prepThreads = 0;
#maxRam = 0;
#index = new Map();
// Simulate mode ignores running scripts. Can be used to make calculations while the batcher is operating.
constructor(ns, servers, simulate = false) {
for (const server of servers) {
if (ns.hasRootAccess(server)) {
const maxRam = ns.getServerMaxRam(server);
// Save some extra ram on home. Clamp used ram to maxRam to prevent negative numbers.
const reserved = (server === "home") ? RESERVED_HOME_RAM : 0;
const used = Math.min((simulate ? 0 : ns.getServerUsedRam(server)) + reserved, maxRam);
const ram = maxRam - used;
if (maxRam > 0) {
const block = { server: server, ram: ram };
this.#blocks.push(block);
if (ram < this.#minBlockSize) this.#minBlockSize = ram;
if (ram > this.#maxBlockSize) this.#maxBlockSize = ram;
this.#totalRam += ram;
this.#maxRam += maxRam;
this.#prepThreads += Math.floor(ram / 1.75);
}
}
}
this.#sort();
this.#blocks.forEach((block, index) => this.#index.set(block.server, index));
}
#sort() {
this.#blocks.sort((x, y) => {
if (x.server === "home") return 1;
if (y.server === "home") return -1;
return x.ram - y.ram;
});
}
get totalRam() {
return this.#totalRam;
}
get maxRam() {
return this.#maxRam;
}
get maxBlockSize() {
return this.#maxBlockSize;
}
get prepThreads() {
return this.#prepThreads;
}
getBlock(server) {
if (this.#index.has(server)) {
return this.#blocks[this.#index.get(server)];
} else {
throw new Error(`Server ${server} not found in RamNet.`);
}
}
assign(job) {
const block = this.#blocks.find(block => block.ram >= job.cost);
if (block) {
job.server = block.server;
block.ram -= job.cost;
this.#totalRam -= job.cost;
return true;
} else return false;
}
finish(job) {
const block = this.getBlock(job.server);
block.ram += job.cost;
this.#totalRam += job.cost;
}
cloneBlocks() {
return this.#blocks.map(block => ({ ...block }));
}
printBlocks(ns) {
for (const block of this.#blocks) ns.print(block);
}
testThreads(threadCosts) {
const pRam = this.cloneBlocks();
let batches = 0;
let found = true;
while (found) {
for (const cost of threadCosts) {
found = false;
const block = pRam.find(block => block.ram >= cost);
if (block) {
block.ram -= cost;
found = true;
} else break;
}
if (found) batches++;
}
return batches;
}
}
// Quite a bit has changed in this one. It's back to being synchronous, though it can still take a while.
/**
* @param {NS} ns
* @param {Metrics} metrics
* @param {RamNet} ramNet
*/
function optimizePeriodic(ns, metrics, ramNet) {
const maxThreads = ramNet.maxBlockSize / 1.75;
const maxMoney = metrics.maxMoney;
const hPercent = ns.hackAnalyze(metrics.target);
const wTime = ns.getWeakenTime(metrics.target);
const minGreed = 0.001;
const maxSpacer = wTime; // This is more of an infinite loop safety net than anything.
const stepValue = 0.01;
let greed = 0.95; // Capping greed a bit lower. I don't have a compelling reason for this.
let spacer = metrics.spacer; // We'll be adjusting the spacer in low ram conditions to allow smaller depths.
while (greed > minGreed && spacer < maxSpacer) {
// We calculate a max depth based on the spacer, then add one as a buffer.
const depth = Math.ceil(wTime / (4 * spacer)) + 1;
const amount = maxMoney * greed;
const hThreads = Math.max(Math.floor(ns.hackAnalyzeThreads(metrics.target, amount)), 1);
const tGreed = hPercent * hThreads;
const gThreads = Math.ceil(ns.growthAnalyze(metrics.target, maxMoney / (maxMoney - maxMoney * tGreed)) * 1.01);
if (Math.max(hThreads, gThreads) <= maxThreads) {
const wThreads1 = Math.max(Math.ceil(hThreads * 0.002 / 0.05), 1);
const wThreads2 = Math.max(Math.ceil(gThreads * 0.004 / 0.05), 1);
const threadCosts = [hThreads * 1.7, wThreads1 * 1.75, gThreads * 1.75, wThreads2 * 1.75];
// Glad I kept these, they turned out to be useful after all. When trying to hit target depth,
// checking that there's actually enough theoretical ram to fit them is a massive boost to performance.
const totalCost = threadCosts.reduce((t, c) => t + c) * depth;
if (totalCost < ramNet.totalRam) {
// Double check that we can actually fit our threads into ram, then set our metrics and return.
const batchCount = ramNet.testThreads(threadCosts);
if (batchCount >= depth) {
metrics.spacer = spacer;
metrics.greed = greed;
metrics.depth = depth;
return
}
}
}
// await ns.sleep(0); // Uncomment and make the function async if you don't like the freeze on startup.
// Decrement greed until we hit the minimum, then reset and increment spacer. We'll find a valid configuration eventually.
greed -= stepValue;
if (greed < minGreed && spacer < maxSpacer) {
greed = 0.99;
++spacer;
}
}
throw new Error("Not enough ram to run even a single batch. Something has gone seriously wrong.");
}

View File

@@ -1,42 +0,0 @@
/*
This script is completely unchanged from the last part. As a note, if you find that saves are taking a very long time
it may help to disable txt logging when you aren't actively debugging. The log files generated by this script
are quite big even when it's erasing the data on each new instance.
*/
/** @param {NS} ns */
export async function main(ns) {
const logFile = "S4log.txt";
ns.clear(logFile); // Clear the previous log for each instance.
ns.disableLog("ALL");
ns.tail();
ns.moveTail(200, 200); // Move it out of the way so it doesn't cover up the controller.
const logPort = ns.getPortHandle(ns.pid);
logPort.clear();
// Pretty simple. Just wait until something writes to the log and save the info.
// Writes to its own console as well as a text file.
let max = 0;
let count = 0;
let total = 0;
let errors = 0;
while (true) {
await logPort.nextWrite();
do {
const data = logPort.read();
// if (data > max) max = data;
// if (data > 5) ++errors;
// total += data;
// ++count;
// ns.clearLog();
// ns.print(`Max desync: ${max}`);
// ns.print(`Average desync: ${total / count}`);
// ns.print(`Errors: ${errors}`);
// if (data.startsWith("WARN")) ns.print(data);
ns.print(data);
// ns.write(logFile, data); // Comment this line out to disable txt logging.
} while (!logPort.empty());
}
}

View File

@@ -1,30 +0,0 @@
/*
A pretty big change this time. Well, big for workers anyway. I've tightened up the delay calculations
to be as perfect as I can get them. Full comments in weaken.js as usual.
*/
/** @param {NS} ns */
export async function main(ns) {
const start = performance.now();
const port = ns.getPortHandle(ns.pid);
const job = JSON.parse(ns.args[0]);
let tDelay = 0;
let delay = job.end - job.time - Date.now();
if (delay < 0) {
ns.tprint(`WARN: Batch ${job.batch} ${job.type} was ${-delay}ms late. (${job.end})\n`);
tDelay = -delay
delay = 0;
}
const promise = ns.grow(job.target, { additionalMsec: delay });
tDelay += performance.now() - start;
port.write(tDelay);
await promise;
ns.atExit(() => {
const end = Date.now();
if (job.report) ns.writePort(job.port, job.type + job.batch);
// Uncomment one of these if you want to log completed jobs. Make sure to uncomment the appropriate lines in the controller as well.
// ns.tprint(`Batch ${job.batch}: ${job.type} finished at ${end.toString().slice(-6)}/${Math.round(job.end).toString().slice(-6)}\n`);
// ns.writePort(job.log, `Batch ${job.batch}: ${job.type} finished at ${end.toString().slice(-6)}/${Math.round(job.end + tDelay).toString().slice(-6)}\n`);
});
}

View File

@@ -1,30 +0,0 @@
/*
A pretty big change this time. Well, big for workers anyway. I've tightened up the delay calculations
to be as perfect as I can get them. Full comments in weaken.js as usual.
*/
/** @param {NS} ns */
export async function main(ns) {
const start = performance.now();
const port = ns.getPortHandle(ns.pid);
const job = JSON.parse(ns.args[0]);
let tDelay = 0;
let delay = job.end - job.time - Date.now();
if (delay < 0) {
ns.tprint(`WARN: Batch ${job.batch} ${job.type} was ${-delay}ms late. (${job.end})\n`);
tDelay = -delay
delay = 0;
}
const promise = ns.hack(job.target, { additionalMsec: delay });
tDelay += performance.now() - start;
port.write(tDelay);
await promise;
ns.atExit(() => {
const end = Date.now();
if (job.report) ns.writePort(job.port, job.type + job.batch);
// Uncomment one of these if you want to log completed jobs. Make sure to uncomment the appropriate lines in the controller as well.
// ns.tprint(`Batch ${job.batch}: ${job.type} finished at ${end.toString().slice(-6)}/${Math.round(job.end).toString().slice(-6)}\n`);
// ns.writePort(job.log, `Batch ${job.batch}: ${job.type} finished at ${end.toString().slice(-6)}/${Math.round(job.end + tDelay).toString().slice(-6)}\n`);
});
}

View File

@@ -1,41 +0,0 @@
/*
A pretty big change this time. Well, big for workers anyway. I've tightened up the delay calculations
to be as perfect as I can get them. Full comments in weaken.js as usual.
*/
/** @param {NS} ns */
export async function main(ns) {
const start = performance.now();
const port = ns.getPortHandle(ns.pid); // We have to define this here. You'll see why in a moment.
const job = JSON.parse(ns.args[0]);
let tDelay = 0;
let delay = job.end - job.time - Date.now();
// Don't report delay right away.
if (delay < 0) {
ns.tprint(`WARN: Batch ${job.batch} ${job.type} was ${-delay}ms late. (${job.end})\n`);
tDelay = -delay
delay = 0;
}
// The actual function call can take some time, so instead of awaiting on it right away, we save the promise for later.
const promise = ns.weaken(job.target, { additionalMsec: delay });
// Then after calling the hack function, we calculate our final delay and report it to the controller.
tDelay += performance.now() - start;
// The ns object is tied up by the promise, so invoking it now would cause a concurrency error.
// That's why we fetched this handle earlier.
port.write(tDelay);
// Then we finally await the promise. This should give millisecond-accurate predictions for the end time of a job.
await promise;
ns.atExit(() => {
const end = Date.now();
if (job.report) ns.writePort(job.port, job.type + job.batch);
// Uncomment one of these if you want to log completed jobs. Make sure to uncomment the appropriate lines in the controller as well.
// ns.tprint(`Batch ${job.batch}: ${job.type} finished at ${end.toString().slice(-6)}/${Math.round(job.end).toString().slice(-6)}\n`);
// ns.writePort(job.log, `Batch ${job.batch}: ${job.type} finished at ${end.toString().slice(-6)}/${Math.round(job.end + tDelay).toString().slice(-6)}\n`);
});
}

View File

@@ -1,420 +0,0 @@
/*
We've got a brand new class to look at, but the rest of the file remains unchanged.
*/
/** @param {NS} ns */
export async function main(ns) {
ns.tprint("This is just a function library, it doesn't do anything.");
}
/*
This is an overengineered abomination of a custom data structure. It is essentially a double-ended queue,
but also has a Map stapled to it, just in case we need to access items by id (we don't.)
The idea is that it can fetch/peek items from the front or back with O(1) timing. This gets around the issue of
dynamic arrays taking O(n) time to shift, which is terrible behavior for very long queues like the one we're using.
*/
export class Deque {
#capacity = 0; // The maximum length.
#length = 0; // The actual number of items in the queue
#front = 0; // The index of the "head" where data is read from the queue.
#deleted = 0; // The number of "dead" items in the queue. These occur when items are deleted by index. They are bad.
#elements; // An inner array to store the data.
#index = new Map(); // A hash table to track items by ID. Try not to delete items using this, it's bad.
// Create a new queue with a specific capacity.
constructor(capacity) {
this.#capacity = capacity;
this.#elements = new Array(capacity);
}
// You can also convert arrays.
static fromArray(array, overallocation = 0) {
const result = new Deque(array.length + overallocation);
array.forEach(item => result.push(item));
return result;
}
// Deleted items don't count towards length, but they still take up space in the array until they can be cleared.
// Seriously, don't use the delete function unless it's absolutely necessary.
get size() {
return this.#length - this.#deleted;
}
isEmpty() {
return this.#length - this.#deleted === 0;
}
// Again, "deleted" items still count towards this. Use caution.
isFull() {
return this.#length === this.#capacity;
}
// The "tail" where data is typically written to.
// Unlike the front, which points at the first piece of data, this point at the first empty slot.
get #back() {
return (this.#front + this.#length) % this.#capacity;
}
// Push a new element into the queue.
push(value) {
if (this.isFull()) {
throw new Error("The deque is full. You cannot add more items.");
}
this.#elements[this.#back] = value;
this.#index.set(value.id, this.#back);
++this.#length;
}
// Pop an item off the back of the queue.
pop() {
while (!this.isEmpty()) {
--this.#length;
const item = this.#elements[this.#back];
this.#elements[this.#back] = undefined; // Free up the item for garbage collection.
this.#index.delete(item.id); // Don't confuse index.delete() with this.delete()
if (item.status !== "deleted") return item; // Clear any "deleted" items we encounter.
else --this.#deleted; // If you needed another reason to avoid deleting by ID, this breaks the O(1) time complexity.
}
throw new Error("The deque is empty. You cannot delete any items.");
}
// Shift an item off the front of the queue. This is the main method for accessing data.
shift() {
while (!this.isEmpty()) {
// Our pointer already knows exactly where the front of the queue is. This is much faster than the array equivalent.
const item = this.#elements[this.#front];
this.#elements[this.#front] = undefined;
this.#index.delete(item.id);
// Move the head up and wrap around if we reach the end of the array. This is essentially a circular buffer.
this.#front = (this.#front + 1) % this.#capacity;
--this.#length;
if (item.status !== "deleted") return item;
else --this.#deleted;
}
throw new Error("The deque is empty. You cannot delete any items.");
}
// Place an item at the front of the queue. Slightly slower than pushing, but still faster than doing it on an array.
unshift(value) {
if (this.isFull()) {
throw new Error("The deque is full. You cannot add more items.");
}
this.#front = (this.#front - 1 + this.#capacity) % this.#capacity;
this.#elements[this.#front] = value;
this.#index.set(value.id, this.#front);
++this.#length;
}
// Peeking at the front is pretty quick, since the head is already looking at it. We just have to clear those pesky "deleted" items first.
peekFront() {
if (this.isEmpty()) {
throw new Error("The deque is empty. You cannot peek.");
}
while (this.#elements[this.#front].status === "deleted") {
this.#index.delete(this.#elements[this.#front]?.id);
this.#elements[this.#front] = undefined;
this.#front = (this.#front + 1) % this.#capacity;
--this.#deleted;
--this.#length;
if (this.isEmpty()) {
throw new Error("The deque is empty. You cannot peek.");
}
}
return this.#elements[this.#front];
}
// Peeking at the back is ever so slightly slower, since we need to recalculate the pointer.
// It's a tradeoff for the faster push function, and it's a very slight difference either way.
peekBack() {
if (this.isEmpty()) {
throw new Error("The deque is empty. You cannot peek.");
}
let back = (this.#front + this.#length - 1) % this.#capacity;
while (this.#elements[back].status === "deleted") {
this.#index.delete(this.#elements[back].id);
this.#elements[back] = undefined;
back = (back - 1 + this.#capacity) % this.#capacity;
--this.#deleted;
--this.#length;
if (this.isEmpty()) {
throw new Error("The deque is empty. You cannot peek.");
}
}
return this.#elements[back];
}
// Fill the queue with a single value.
fill(value) {
while (!this.isFull()) {
this.push(value);
}
}
// Empty the whole queue.
clear() {
while (!this.isEmpty()) {
this.pop();
}
}
// Check if an ID exists.
exists(id) {
return this.#index.has(id);
}
// Fetch an item by ID
get(id) {
let pos = this.#index.get(id);
return pos !== undefined ? this.#elements[pos] : undefined;
}
// DON'T
delete(id) {
let item = this.get(id);
if (item !== undefined) {
item.status = "deleted";
++this.#deleted;
return item;
} else {
throw new Error("Item not found in the deque.");
}
}
}
// The recursive server navigation algorithm. The lambda predicate determines which servers to add to the final list.
// You can also plug other functions into the lambda to perform other tasks that check all servers at the same time.
/** @param {NS} ns */
export function getServers(ns, lambdaCondition = () => true, hostname = "home", servers = [], visited = []) {
if (visited.includes(hostname)) return;
visited.push(hostname);
if (lambdaCondition(hostname)) servers.push(hostname);
const connectedNodes = ns.scan(hostname);
if (hostname !== "home") connectedNodes.shift();
for (const node of connectedNodes) getServers(ns, lambdaCondition, node, servers, visited);
return servers;
}
// Here are a couple of my own getServers modules.
// This one finds the best target for hacking. It tries to balance expected return with time taken.
/** @param {NS} ns */
export function checkTarget(ns, server, target = "n00dles", forms = false) {
if (!ns.hasRootAccess(server)) return target;
const player = ns.getPlayer();
const serverSim = ns.getServer(server);
const pSim = ns.getServer(target);
let previousScore;
let currentScore;
if (serverSim.requiredHackingSkill <= player.skills.hacking / (forms ? 1 : 2)) {
if (forms) {
serverSim.hackDifficulty = serverSim.minDifficulty;
pSim.hackDifficulty = pSim.minDifficulty;
previousScore = pSim.moneyMax / ns.formulas.hacking.weakenTime(pSim, player) * ns.formulas.hacking.hackChance(pSim, player);
currentScore = serverSim.moneyMax / ns.formulas.hacking.weakenTime(serverSim, player) * ns.formulas.hacking.hackChance(serverSim, player);
} else {
const weight = (serv) => {
// Calculate the difference between max and available money
let diff = serv.moneyMax - serv.moneyAvailable;
// Calculate the scaling factor as the ratio of the difference to the max money
// The constant here is just an adjustment to fine tune the influence of the scaling factor
let scalingFactor = diff / serv.moneyMax * 0.95;
// Adjust the weight based on the difference, applying the scaling penalty
return (serv.moneyMax / serv.minDifficulty) * (1 - scalingFactor);
}
previousScore = weight(pSim)
currentScore = weight(serverSim)
}
if (currentScore > previousScore) target = server;
}
return target;
}
// A simple function for copying a list of scripts to a server.
/** @param {NS} ns */
export function copyScripts(ns, server, scripts, overwrite = false) {
for (const script of scripts) {
if ((!ns.fileExists(script, server) || overwrite) && ns.hasRootAccess(server)) {
ns.scp(script, server);
}
}
}
// A generic function to check that a given server is prepped. Mostly just a convenience.
export function isPrepped(ns, server) {
const tolerance = 0.0001;
const maxMoney = ns.getServerMaxMoney(server);
const money = ns.getServerMoneyAvailable(server);
const minSec = ns.getServerMinSecurityLevel(server);
const sec = ns.getServerSecurityLevel(server);
const secFix = Math.abs(sec - minSec) < tolerance;
return (money === maxMoney && secFix) ? true : false;
}
/*
This prep function isn't part of the tutorial, but the rest of the code wouldn't work without it.
I don't make any guarantees, but I've been using it and it's worked well enough. I'll comment it anyway.
The prep strategy uses a modified proto-batching technique, which will be covered in part 2.
*/
/** @param {NS} ns */
export async function prep(ns, values, ramNet) {
const maxMoney = values.maxMoney;
const minSec = values.minSec;
let money = values.money;
let sec = values.sec;
while (!isPrepped(ns, values.target)) {
const wTime = ns.getWeakenTime(values.target);
const gTime = wTime * 0.8;
const dataPort = ns.getPortHandle(ns.pid);
dataPort.clear();
const pRam = ramNet.cloneBlocks();
const maxThreads = Math.floor(ramNet.maxBlockSize / 1.75);
const totalThreads = ramNet.prepThreads;
let wThreads1 = 0;
let wThreads2 = 0;
let gThreads = 0;
let batchCount = 1;
let script, mode;
/*
Modes:
0: Security only
1: Money only
2: One shot
*/
if (money < maxMoney) {
gThreads = Math.ceil(ns.growthAnalyze(values.target, maxMoney / money));
wThreads2 = Math.ceil(ns.growthAnalyzeSecurity(gThreads) / 0.05);
}
if (sec > minSec) {
wThreads1 = Math.ceil((sec - minSec) * 20);
if (!(wThreads1 + wThreads2 + gThreads <= totalThreads && gThreads <= maxThreads)) {
gThreads = 0;
wThreads2 = 0;
batchCount = Math.ceil(wThreads1 / totalThreads);
if (batchCount > 1) wThreads1 = totalThreads;
mode = 0;
} else mode = 2;
} else if (gThreads > maxThreads || gThreads + wThreads2 > totalThreads) {
mode = 1;
const oldG = gThreads;
wThreads2 = Math.max(Math.floor(totalThreads / 13.5), 1);
gThreads = Math.floor(wThreads2 * 12.5);
batchCount = Math.ceil(oldG / gThreads);
} else mode = 2;
// Big buffer here, since all the previous calculations can take a while. One second should be more than enough.
const wEnd1 = Date.now() + wTime + 1000;
const gEnd = wEnd1 + values.spacer;
const wEnd2 = gEnd + values.spacer;
// "metrics" here is basically a mock Job object. Again, this is just an artifact of repurposed old code.
const metrics = {
batch: "prep",
target: values.target,
type: "none",
time: 0,
end: 0,
port: ns.pid,
log: values.log,
report: false
};
// Actually assigning threads. We actually allow grow threads to be spread out in mode 1.
// This is because we don't mind if the effect is a bit reduced from higher security unlike a normal batcher.
// We're not trying to grow a specific amount, we're trying to grow as much as possible.
for (const block of pRam) {
while (block.ram >= 1.75) {
const bMax = Math.floor(block.ram / 1.75)
let threads = 0;
if (wThreads1 > 0) {
script = "S4tWeaken.js";
metrics.type = "pWeaken1";
metrics.time = wTime;
metrics.end = wEnd1;
threads = Math.min(wThreads1, bMax);
if (wThreads2 === 0 && wThreads1 - threads <= 0) metrics.report = true;
wThreads1 -= threads;
} else if (wThreads2 > 0) {
script = "S4tWeaken.js";
metrics.type = "pWeaken2";
metrics.time = wTime;
metrics.end = wEnd2;
threads = Math.min(wThreads2, bMax);
if (wThreads2 - threads === 0) metrics.report = true;
wThreads2 -= threads;
} else if (gThreads > 0 && mode === 1) {
script = "S4tGrow.js";
metrics.type = "pGrow";
metrics.time = gTime;
metrics.end = gEnd;
threads = Math.min(gThreads, bMax);
metrics.report = false;
gThreads -= threads;
} else if (gThreads > 0 && bMax >= gThreads) {
script = "S4tGrow.js";
metrics.type = "pGrow";
metrics.time = gTime;
metrics.end = gEnd;
threads = gThreads;
metrics.report = false;
gThreads = 0;
} else break;
metrics.server = block.server;
const pid = ns.exec(script, block.server, { threads: threads, temporary: true }, JSON.stringify(metrics));
if (!pid) throw new Error("Unable to assign all jobs.");
block.ram -= 1.75 * threads;
}
}
// Fancy UI stuff to update you on progress.
const tEnd = ((mode === 0 ? wEnd1 : wEnd2) - Date.now()) * batchCount + Date.now();
const timer = setInterval(() => {
ns.clearLog();
switch (mode) {
case 0:
ns.print(`Weakening security on ${values.target}...`);
break;
case 1:
ns.print(`Maximizing money on ${values.target}...`);
break;
case 2:
ns.print(`Finalizing preparation on ${values.target}...`);
}
ns.print(`Security: +${ns.formatNumber(sec - minSec, 3)}`);
ns.print(`Money: \$${ns.formatNumber(money, 2)}/${ns.formatNumber(maxMoney, 2)}`);
const time = tEnd - Date.now();
ns.print(`Estimated time remaining: ${ns.tFormat(time)}`);
ns.print(`~${batchCount} ${(batchCount === 1) ? "batch" : "batches"}.`);
}, 200);
ns.atExit(() => clearInterval(timer));
// Wait for the last weaken to finish.
do await dataPort.nextWrite(); while (!dataPort.read().startsWith("pWeaken"));
clearInterval(timer);
await ns.sleep(100);
money = ns.getServerMoneyAvailable(values.target);
sec = ns.getServerSecurityLevel(values.target);
}
return true;
}
// I don't actually use this anywhere it the code. It's a debugging tool that I use to test the runtimes of functions.
export function benchmark(lambda) {
let result = 0;
for (let i = 0; i <= 1000; ++i) {
const start = performance.now();
lambda(i);
result += performance.now() - start;
}
return result / 1000;
}

View File

@@ -1,42 +0,0 @@
import { getCracks, scanServerList, findBestTarget, crackingAndRooting, copyAndRunScript, purchaseAndUpgradeServers } from "/RamsesUtils.js";
/** @param {NS} ns */
export async function main(ns) {
const funnyScript = ["batch.js", "Ramses-grow.js", "Ramses-weaken.js", "Ramses-hack.js"];
//write function to purchase scripts from tor network and rerun getCracks() then recrack and reroot
let cracks = {};
cracks = getCracks(ns);
let maxPorts = Object.keys(cracks).length;
scanServerList(ns);
let manualTargetOverride = "";
if (ns.getHackingLevel() < 200) {
manualTargetOverride = "n00dles";
};
findBestTarget(ns, 999, maxPorts, ns.getHackingLevel(), manualTargetOverride);
let bestTarget = ns.read("bestTarget.txt")
ns.tprint("Best Target: " + bestTarget);
ns.tprint(Object.keys(JSON.parse(ns.read("serverList.txt"))).length);
crackingAndRooting(ns, cracks, funnyScript, true);
ns.exec(funnyScript[0], "home", 1, JSON.parse(bestTarget).serverName, 500, true);
let reset = ns.args[0];
ns.print(reset);
if (reset === true) {
ns.tprint("reset === true")
findBestTarget(ns, 999, maxPorts, ns.getHackingLevel(), manualTargetOverride);
let serverList = JSON.parse(ns.read("serverList.txt"));
for (const [name, entry] of Object.entries(serverList)) {
copyAndRunScript(ns, funnyScript, name);
}
}
/*let serverListForFiles = JSON.parse(ns.read("serverList.txt"));
for (const [name2, entry2] of Object.entries(serverListForFiles)) {
ns.tprint(name2 + " Files: " + entry2.serverFiles)
}*/
//await ns.sleep(500000);
await purchaseAndUpgradeServers(ns);
}

View File

@@ -1,21 +0,0 @@
/** @param {NS} ns */
export async function main(ns) {
const sTarget = ns.args[0]; // target server which has the contract
const sContract = ns.args[1]; // target contract file
//ns.tprint(ns.codingcontract.getContractTypes());
//ns.codingcontract.createDummyContract();
const sContractType = ns.codingcontract.getContractType(sContract, sTarget);
const sContractData = ns.codingcontract.getData(sContract, sTarget);
const sContractDescription = ns.codingcontract.getDescription(sContract, sTarget);
const sContractTries = ns.codingcontract.getNumTriesRemaining(sContract, sTarget);
ns.tprint("sContractType = " + sContractType);
ns.tprint("sContractData = " + sContractData);
ns.tprint("sContractDescription = " + sContractDescription);
ns.tprint("sContractTries = " + sContractTries);
JSON.stringify(sContractType,sContractType, true);
}

View File

@@ -1,16 +0,0 @@
/** @param {NS} ns */
export async function main(ns) {
let cities = ["Sector-12", "Aevum", "Volhaven", "Chongqing", "New Tokyo", "Ishima"];
let corpName = "ChemNeo";
let currentSize = 0;
for (let city of cities) {
let currentOffice=(ns.corporation.getOffice(corpName, city));
if (currentOffice.numEmployees < currentOffice.size) {
(currentOffice.employeeJobs.Operations < 1) ? ns.corporation.hireEmployee(corpName, city,"Operations") : "";
(currentOffice.employeeJobs.Engineer < 1) ? ns.corporation.hireEmployee(corpName, city,"Engineer") : "";
(currentOffice.employeeJobs.Business < 1) ? ns.corporation.hireEmployee(corpName, city,"Business") : "";
(currentOffice.employeeJobs.Management < 1) ? ns.corporation.hireEmployee(corpName, city,"Management") : "";
};
}
}

View File

@@ -1,6 +0,0 @@
/** @param {NS} ns */
export async function main(ns) {
let [corpName, city] = ns.args;
//ns.corporation.setSmartSupply(corpName, city, true);
return true;
}

View File

@@ -1,12 +0,0 @@
/** @param {NS} ns */
export async function main(ns) {
let cities = ["Sector-12", "Aevum", "Volhaven", "Chongqing", "New Tokyo", "Ishima"];
let corpName = "ChemNeo";
let currentSize = 0;
for (let city of cities) {
currentSize = ns.corporation.getOffice(corpName, city).size;
if (currentSize < 4) {
ns.corporation.upgradeOfficeSize(corpName, city, 4 - currentSize);
};
}
}

View File

@@ -1,8 +0,0 @@
/** @param {NS} ns */
export async function main(ns) {
let serverList = JSON.parse(ns.read("serverList.txt"));
for (const [name, entry] of Object.entries(serverList)) {
ns.killall(name, true)
}
}

View File

@@ -1,5 +0,0 @@
import { getCracks, scanServerList, findBestTarget, crackingAndRooting, copyAndRunScript, purchaseAndUpgradeServers } from "/RamsesUtils.js";
/** @param {NS} ns */
export async function main(ns) {
await purchaseAndUpgradeServers(ns);
}

View File

@@ -1 +0,0 @@
{"home":{"n00dles":{"CSEC":{"omega-net":{}}},"foodnstuff":{"nectar-net":{"silver-helix":{"the-hub":{},"netlink":{"zb-institute":{"lexo-corp":{}},"catalyst":{},"I.I.I.I":{}}},"phantasy":{"computek":{"summit-uni":{}},"avmnite-02h":{}}}},"sigma-cosmetics":{"max-hardware":{"neo-net":{"johnson-ortho":{},"crush-fitness":{"rothman-uni":{"aevum-police":{"snap-fitness":{"omnia":{"solaris":{}}}},"millenium-fitness":{}},"syscore":{"rho-construction":{"galactic-cyber":{"unitalife":{"univ-energy":{"nova-med":{}}}},"global-pharm":{}},"alpha-ent":{"aerocorp":{"deltaone":{"defcomm":{"taiyang-digital":{"titan-labs":{},"applied-energetics":{"stormtech":{"4sigma":{}}},"run4theh111z":{"fulcrumtech":{"omnitek":{"powerhouse-fitness":{}},"kuai-gong":{"blade":{"fulcrumassets":{}},"nwo":{"ecorp":{}},"clarkinc":{"megacorp":{},"The-Cave":{}}}},"vitalife":{".":{"b-and-a":{}}}}}},"icarus":{"infocomm":{"microdyne":{"helios":{}}}},"zeus-med":{"zb-def":{}}}}}}}}}},"joesguns":{},"hong-fang-tea":{"zer0":{}},"harakiri-sushi":{},"iron-gym":{},"pserv-00":{},"pserv-01":{},"pserv-02":{},"pserv-03":{},"pserv-04":{},"pserv-05":{},"pserv-06":{},"pserv-07":{},"pserv-08":{},"pserv-09":{},"pserv-10":{},"pserv-11":{},"pserv-12":{},"pserv-13":{},"pserv-14":{},"pserv-15":{},"pserv-16":{},"pserv-17":{},"pserv-18":{},"pserv-19":{},"pserv-20":{},"pserv-21":{},"pserv-22":{},"pserv-23":{},"pserv-24":{},"darkweb":{}}}

View File

@@ -1 +0,0 @@
{"serverName":"the-hub","maxRam":32,"maxMoney":4832749575,"minSec":24,"minPorts":2,"minHackLvl":291,"rootAccess":true,"openPorts":0,"serverOrgs":"The Hub","serverCores":4,"serverFiles":["factionboost.js"]}

View File

@@ -0,0 +1,5 @@
/** @param {NS} ns */
export async function main(ns) {
ns.corporation.purchaseUnlock("Smart Supply")
ns.writePort(ns.pid, true);
}

View File

@@ -0,0 +1,6 @@
/** @param {NS} ns */
export async function main(ns) {
let [sDivName, sCity] = ns.args;
ns.corporation.expandCity(sDivName, sCity)
ns.writePort(ns.pid, true);
}

View File

@@ -0,0 +1,6 @@
/** @param {NS} ns */
export async function main(ns) {
let [newDivName,industry] = ns.args;
ns.corporation.expandIndustry(industry, newDivName);
ns.writePort(ns.pid,true);
}

View File

@@ -0,0 +1,7 @@
/** @param {NS} ns */
export async function main(ns) {
let [sCorpName] = ns.args;
ns.corporation.createCorporation(sCorpName,true)
ns.writePort(ns.pid, true);
}

View File

@@ -0,0 +1,4 @@
/** @param {NS} ns */
export async function main(ns) {
ns.writePort(ns.pid,ns.corporation.getCorporation())
}

View File

@@ -0,0 +1,4 @@
/** @param {NS} ns */
export async function main(ns) {
ns.writePort(ns.pid,ns.corporation.getDivision(ns.args[0]))
}

View File

@@ -0,0 +1,5 @@
/** @param {NS} ns */
export async function main(ns) {
let [sDivName, sCity] = ns.args;
ns.writePort(ns.pid, ns.corporation.getOffice(sDivName, sCity))
}

View File

@@ -0,0 +1,9 @@
/** @param {NS} ns */
export async function main(ns) {
let [sDivName, sCity] = ns.args;
if (ns.corporation.hasWarehouse(sDivName, sCity)) {
ns.writePort(ns.pid, ns.corporation.getWarehouse(sDivName, sCity))
} else {
ns.writePort(ns.pid, false)
};
}

View File

@@ -0,0 +1,5 @@
/** @param {NS} ns */
export async function main(ns) {
ns.writePort(ns.pid,ns.corporation.hasCorporation());
}

View File

@@ -0,0 +1,4 @@
/** @param {NS} ns */
export async function main(ns) {
ns.writePort(ns.pid, ns.corporation.hasUnlock("Smart Supply"));
}

View File

@@ -0,0 +1,12 @@
/** @param {NS} ns */
export async function main(ns) {
let [sDivName, sCity, oCurrentOffice] = ns.args;
oCurrentOffice = JSON.parse(oCurrentOffice)
if (oCurrentOffice.numEmployees < oCurrentOffice.size) {
(oCurrentOffice.employeeJobs.Operations < 1) ? ns.corporation.hireEmployee(sDivName, sCity, "Operations") : "";
(oCurrentOffice.employeeJobs.Engineer < 1) ? ns.corporation.hireEmployee(sDivName, sCity, "Engineer") : "";
(oCurrentOffice.employeeJobs.Business < 1) ? ns.corporation.hireEmployee(sDivName, sCity, "Business") : "";
(oCurrentOffice.employeeJobs.Management < 1) ? ns.corporation.hireEmployee(sDivName, sCity, "Management") : "";
};
ns.writePort(ns.pid, true);
}

View File

@@ -0,0 +1,6 @@
/** @param {NS} ns */
export async function main(ns) {
let [sDivName, sCity] = ns.args;
ns.corporation.purchaseWarehouse(sDivName, sCity);
ns.writePort(ns.pid, true);
}

View File

@@ -0,0 +1,7 @@
/** @param {NS} ns */
export async function main(ns) {
let [sDivName, sCity, oCurrentGoods] = ns.args;
oCurrentGoods = JSON.parse(oCurrentGoods);
ns.corporation.limitMaterialProduction(sDivName, sCity, oCurrentGoods.sMaterialName,50);
ns.writePort(ns.pid, true);
}

View File

@@ -0,0 +1,7 @@
/** @param {NS} ns */
export async function main(ns) {
let [sDivName, sCity, oCurrentGoods] = ns.args;
oCurrentGoods = JSON.parse(oCurrentGoods);
ns.corporation.sellMaterial(sDivName,sCity,oCurrentGoods.sMaterialName,oCurrentGoods.yAmount,oCurrentGoods.yPrice);
ns.writePort(ns.pid, true);
}

View File

@@ -0,0 +1,6 @@
/** @param {NS} ns */
export async function main(ns) {
let [sDivName, sCity] = ns.args;
ns.corporation.setSmartSupply(sDivName, sCity, true);
ns.writePort(ns.pid, true);
}

View File

@@ -0,0 +1,8 @@
/** @param {NS} ns */
export async function main(ns) {
let [sDivName, sCity, nTargetSize, nCurrentSize] = ns.args;
if (nCurrentSize < nTargetSize) {
ns.corporation.upgradeOfficeSize(sDivName, sCity, nTargetSize - nCurrentSize);
};
ns.writePort(ns.pid, true);
}

View File

@@ -1,40 +0,0 @@
# Bitburner
[![Join Discord](https://img.shields.io/discord/415207508303544321)](https://discord.gg/TFc3hKD)
[![Build Status](https://github.com/bitburner-official/bitburner-src/actions/workflows/ci.yml/badge.svg?branch=dev)](https://github.com/bitburner-official/bitburner-src/actions/workflows/ci.yml)
Bitburner is a programming-based [incremental game](https://en.wikipedia.org/wiki/Incremental_game)
that revolves around hacking and cyberpunk themes.
The game can be played at https://bitburner-official.github.io/ (release build), https://bitburner-official.github.io/bitburner-src/ (development build), or installed through [Steam](https://store.steampowered.com/app/1812820/Bitburner/).
The location of the release build may change in the near future.
See the [frequently asked questions](./doc/FAQ.md) for more information . To discuss the game or get help, join the [official Discord server](https://discord.gg/TFc3hKD).
# Documentation
The game's official documentation can be found in-game.
The [in-game documentation](./markdown/bitburner.md) is generated from the [TypeScript definitions](./src/ScriptEditor/NetscriptDefinitions.d.ts).
Anyone is welcome to contribute to the documentation by editing the [source
files](/src/Documentation/doc) and then making a pull request with your contributions.
For further guidance, please refer to the "As A Documenter" section of
[CONTRIBUTING](./doc/CONTRIBUTING.md#as-a-documenter).
# Contribution
There are many ways to contribute to the game. It can be as simple as fixing
a typo, correcting a bug, or improving the UI. For guidance on doing so,
please refer to the [CONTRIBUTING](./doc/CONTRIBUTING.md) document.
You will retain all ownership of the Copyright of any contributions you make,
and will have the same rights to use or license your contributions. By
submitting a pull request you agree to grant me perpetual, worldwide,
non-exclusive, transferable, royalty-free, and irrevocable rights to use,
publish, and distribute your contributions to the project. A formal
Contributor's License Agreement will be drawn up in the future.
If you would like to make significant contributions to the project as a
collaborator, please reach out in #suggestions or #development on Discord to
help coordinate the effort.

View File

@@ -8,6 +8,7 @@ export async function main(ns) {
const sPrepScript = "lowram/Lowprep.js"; const sPrepScript = "lowram/Lowprep.js";
const sRunScript = "lowram/Lowrun.js"; const sRunScript = "lowram/Lowrun.js";
const sLowGrowScript = "lowram/Lowgrow.js"; const sLowGrowScript = "lowram/Lowgrow.js";
const sDistScript = "testdistribute.js";
const sGrowScript = "RMgrow.js"; const sGrowScript = "RMgrow.js";

View File

@@ -8,17 +8,23 @@ export async function main(ns) {
const sGrowScript = "RMgrow.js"; const sGrowScript = "RMgrow.js";
const sWeakenScript = "RMweaken.js"; const sWeakenScript = "RMweaken.js";
const sServerListFile = "serverList.txt"; const sServerListFile = "serverList.txt";
const sDistScript = "testdistribute.js";
const nHackScriptRAM = 1.75; const nHackScriptRAM = 1.75;
const nGrowScriptRAM = 1.7; const nGrowScriptRAM = 1.7;
const nWeakenScriptRAM = 1.75; const nWeakenScriptRAM = 1.75;
const nThisScriptRAM = 4.65; const nThisScriptRAM = 4.70;
await ns.sleep(1);
while (true) { while (true) {
//let nMaxRAM = ns.getServerMaxRam("home"); let nMaxRAM = ns.getServerMaxRam("home");
let oServerlist = JSON.parse(ns.read(sServerListFile)); let oServerlist = JSON.parse(ns.read(sServerListFile));
//let nMaxRAM = oServerlist[1].nMaxRAM;
ns.tprint("nMaxRAM = " + nMaxRAM);
//let nMaxRAM = [name, entry] of Object.entries(oServerlist); //let nMaxRAM = [name, entry] of Object.entries(oServerlist);
let nMaxRAM = oServerlist["home"].maxRam; //let nMaxRAM = oServerlist["home"].maxRam;
//ns.print("nMaxRAM = " + nMaxRAM); //ns.print("nMaxRAM = " + nMaxRAM);
let nWeakenTime = ns.getWeakenTime(sTarget); let nWeakenTime = ns.getWeakenTime(sTarget);
@@ -72,7 +78,7 @@ export async function main(ns) {
nCheckRAM = nThisScriptRAM + (nTempHackThreads * nHackScriptRAM) + (nTempGrowThreadsINT * nGrowScriptRAM) + (nTempWeakenThreadsINT * nWeakenScriptRAM); nCheckRAM = nThisScriptRAM + (nTempHackThreads * nHackScriptRAM) + (nTempGrowThreadsINT * nGrowScriptRAM) + (nTempWeakenThreadsINT * nWeakenScriptRAM);
//ns.print("nCheckRAM = " + nCheckRAM); //ns.print("nCheckRAM = " + nCheckRAM);
if (nCheckRAM < nMaxRAM) { if (nCheckRAM < nMaxRAM) {
nUsedRAM = nCheckRAM; nUsedRAM = nCheckRAM;
nHackThreads = nTempHackThreads; nHackThreads = nTempHackThreads;

View File

@@ -12,6 +12,7 @@ export async function main(ns) {
const sPrepScript = "lowram/Lowprep.js"; const sPrepScript = "lowram/Lowprep.js";
const sRunScript = "lowram/Lowrun.js"; const sRunScript = "lowram/Lowrun.js";
const sGrowScript = "lowram/Lowgrow.js"; const sGrowScript = "lowram/Lowgrow.js";
const sDistScript = "testdistribute.js";
//declare variables //declare variables
const sWeakenScript = "RMweaken.js"; const sWeakenScript = "RMweaken.js";
@@ -39,8 +40,11 @@ export async function main(ns) {
ns.print(nThreads + " of " + sWeakenScript + " requires " + (ns.getScriptRam(sWeakenScript, "home") * nThreads) + " GB of RAM"); ns.print(nThreads + " of " + sWeakenScript + " requires " + (ns.getScriptRam(sWeakenScript, "home") * nThreads) + " GB of RAM");
ns.print("weakening will take " + (nDelay / 1000 / 60) + " minutes"); ns.print("weakening will take " + (nDelay / 1000 / 60) + " minutes");
ns.run(sDistScript, 1, sWeakenScript, nThreads, sTarget, false, 0);
await ns.sleep(nDelay+10);
/*
if ((ns.getServerMaxRam(oHome.hostname) - ns.getServerUsedRam(oHome.hostname)) >= (ns.getScriptRam(sWeakenScript, "home") * nThreads)) { if ((ns.getServerMaxRam(oHome.hostname) - ns.getServerUsedRam(oHome.hostname)) >= (ns.getScriptRam(sWeakenScript, "home") * nThreads)) {
nWeakenPID = ns.run(sWeakenScript, nThreads, sTarget); nWeakenPID = ns.exec(sWeakenScript, nThreads, sTarget);
//ns.print("Waiting for PID = " + nWeakenPID); //ns.print("Waiting for PID = " + nWeakenPID);
await ns.nextPortWrite(nWeakenPID); await ns.nextPortWrite(nWeakenPID);
nSecurity = ns.getServerSecurityLevel(sTarget); nSecurity = ns.getServerSecurityLevel(sTarget);
@@ -48,12 +52,14 @@ export async function main(ns) {
} }
else { else {
nThreads = Math.floor(((ns.getServerMaxRam("home") - ns.getServerUsedRam("home")) / ns.getScriptRam(sWeakenScript))); nThreads = Math.floor(((ns.getServerMaxRam("home") - ns.getServerUsedRam("home")) / ns.getScriptRam(sWeakenScript)));
nWeakenPID = ns.run(sWeakenScript, nThreads, sTarget); nWeakenPID = ns.exec(sWeakenScript, nThreads, sTarget);
//ns.print("Waiting for PID = " + nWeakenPID); //ns.print("Waiting for PID = " + nWeakenPID);
await ns.nextPortWrite(nWeakenPID); await ns.nextPortWrite(nWeakenPID);
nSecurity = ns.getServerSecurityLevel(sTarget); nSecurity = ns.getServerSecurityLevel(sTarget);
//ns.print("Breach in progress, security level is now at: " + nSecurity); //ns.print("Breach in progress, security level is now at: " + nSecurity);
} }
*/
} }
//back to Lowrun //back to Lowrun
ns.spawn(sRunScript, { threads: 1, spawnDelay: 0 }, sTarget); ns.spawn(sRunScript, { threads: 1, spawnDelay: 0 }, sTarget);

View File

@@ -8,7 +8,10 @@ export async function main(ns) {
const sPrepScript = "lowram/Lowprep.js"; const sPrepScript = "lowram/Lowprep.js";
const sRunScript = "lowram/Lowrun.js"; const sRunScript = "lowram/Lowrun.js";
const sGrowScript = "lowram/Lowgrow.js"; const sGrowScript = "lowram/Lowgrow.js";
const sDistScript = "testdistribute.js";
await ns.sleep(1);
const nTargetMoney = ns.getServerMoneyAvailable(sTarget); const nTargetMoney = ns.getServerMoneyAvailable(sTarget);
const nTargetMaxMoney = ns.getServerMaxMoney(sTarget); const nTargetMaxMoney = ns.getServerMaxMoney(sTarget);
const nTargetSecurity = ns.getServerSecurityLevel(sTarget); const nTargetSecurity = ns.getServerSecurityLevel(sTarget);
@@ -20,10 +23,10 @@ export async function main(ns) {
if (nTargetSecurity > nTargetMinSecurity + 1) { if (nTargetSecurity > nTargetMinSecurity + 1) {
ns.spawn(sPrepScript, { threads: 1, spawnDelay: 0 }, sTarget); ns.spawn(sPrepScript, { threads: 1, spawnDelay: 0 }, sTarget);
} }
else if (nTargetMoney < nTargetMaxMoney) { /*else if (nTargetMoney < nTargetMaxMoney) {
//ns.print("need to prep MONEY, running " + sGrowScript); //ns.print("need to prep MONEY, running " + sGrowScript);
ns.spawn(sGrowScript, { threads: 1, spawnDelay: 0 }, sTarget); ns.spawn(sGrowScript, { threads: 1, spawnDelay: 0 }, sTarget);
} }*/
else { else {
ns.spawn(sBatchScript, { threads: 1, spawnDelay: 0 }, sTarget); ns.spawn(sBatchScript, { threads: 1, spawnDelay: 0 }, sTarget);
} }

View File

@@ -1,6 +0,0 @@
{"setting":{
"autoUpgrades":true,
"autoPurchaseServers":true,
"batchHacks":100,
"batchFrequency":1000
}}

View File

@@ -0,0 +1,4 @@
/** @param {NS} ns */
export async function main(ns) {
ns.wget("https://storage.googleapis.com/bitburner_test_bucket_1/First%20File.txt", "firstFile.txt");
}

View File

@@ -1,8 +1,11 @@
import { sortJsonArrayByKey } from "Library.js";
/** @param {NS} ns */ /** @param {NS} ns */
export async function main(ns) { export async function main(ns) {
ns.tail(); ns.tail();
ns.disableLog("ALL");
const sScript = ns.args[0]; // script const sScript = ns.args[0]; // script
const nThreads = ns.args[1]; // threads let nThreads = ns.args[1]; // threads
const sTarget = ns.args[2]; // target server const sTarget = ns.args[2]; // target server
const bRepeat = ns.args[3]; // should this script loop const bRepeat = ns.args[3]; // should this script loop
const nMsecDelay = ns.args[4]; // MsecDelay const nMsecDelay = ns.args[4]; // MsecDelay
@@ -15,15 +18,15 @@ export async function main(ns) {
const sWorkerList = "WorkerList.txt"; const sWorkerList = "WorkerList.txt";
if (!ns.fileExists(sListName, "home")) { ns.print(`ERROR ${sListName} does not exist.`); return false; }; if (!ns.fileExists(sListName, "home")) { ns.print(`ERROR ${sListName} does not exist.`); return false; };
let sServerList = JSON.parse(ns.read(sListName)); let sServerList = ns.read(sListName);
let aSortedList = sortJsonArrayByKey(sServerList, "serverCores", "maxRam").reverse(); let aSortedList = sortJsonArrayByKey(sServerList, "serverCores", "maxRam").reverse();
//ns.tprint(aSortedList);
ns.print(sScript); ns.print(sScript);
ns.print(nThreads); ns.print(nThreads);
ns.print(sTarget); ns.print(sTarget);
const nScriptSize = ns.getScriptRam(sScript, "home"); const nScriptSize = ns.getScriptRam(sScript, "home");
const nTotalSize = nScriptSize * nThreads; let nTotalSize = nScriptSize * nThreads;
ns.print("nScriptSize = " + nScriptSize); ns.print("nScriptSize = " + nScriptSize);
ns.print("nTotalSize = " + nTotalSize); ns.print("nTotalSize = " + nTotalSize);
@@ -47,12 +50,23 @@ export async function main(ns) {
// run remaining hacks on smallest to biggest core servers // run remaining hacks on smallest to biggest core servers
aSortedList.forEach((entry) => { aSortedList.forEach((entry) => {
if (entry.rootAccess && ns.getServerMaxRam() >= ns.getScriptRam(sScript, "home")) { let sHost = entry.serverName;
let sHost = entry.serverName; ns.print("sHost = " + sHost)
ns.scp(sScript, sHost);
let nFreeRAM = ns.getServerMaxRam(entry.serverName) - ns.getServerUsedRam(entry.serverName);
if (nThreads >= 1 && entry.rootAccess && nFreeRAM >= nTotalSize) {
ns.exec(sScript, sHost, nThreads, sTarget, bRepeat, nMsecDelay); ns.exec(sScript, sHost, nThreads, sTarget, bRepeat, nMsecDelay);
nThreads = 0;
nFreeRAM = ns.getServerMaxRam(entry.serverName) - ns.getServerUsedRam(entry.serverName);
} }
}) else if (nThreads >= 1 && entry.rootAccess && nFreeRAM >= nScriptSize){
let nThreadsDist = Math.floor(nFreeRAM / nScriptSize);
ns.print("room for : " + nThreadsDist + " scripts");
ns.exec(sScript, sHost, nThreadsDist, sTarget, bRepeat, nMsecDelay);
nThreads -= nThreadsDist;
nFreeRAM = ns.getServerMaxRam(entry.serverName) - ns.getServerUsedRam(entry.serverName);
}
});
/* /*
for (i = 0; ; i++) { for (i = 0; ; i++) {