clean up probe

This commit is contained in:
Simon Larsen
2023-05-03 17:47:47 +01:00
parent 4fe92a1096
commit 4aab4314c5
11 changed files with 15 additions and 1547 deletions

View File

@@ -125,7 +125,7 @@ const MonitorProbes: FunctionComponent<PageComponentProps> = (
icon: IconProp.Signal,
title: 'Probes',
description:
'Probes help you monitor this resource.',
'List of probes that help you monitor this resource.',
}}
noItemsMessage={'No probes found for this resource. However, you can add some probes to monitor this resource.'}
viewPageRoute={Navigation.getCurrentRoute()}
@@ -182,7 +182,7 @@ const MonitorProbes: FunctionComponent<PageComponentProps> = (
title: 'Last Monitored At',
type: FieldType.DateTime,
isFilterable: false,
noValueMessage: 'Never. Will be monitored soon.',
noValueMessage: 'Will be picked up by this probe soon.',
},
{

View File

@@ -1,67 +0,0 @@
import ApiService from '../Utils/apiService';
import pingfetch from '../Utils/pingFetch';
import logger from 'CommonServer/Utils/Logger';
/*
* It collects all monitors then ping them one by one to store their response
* Checks if the website of the url in the monitors is up or down
* Creates incident if a website is down and resolves it when they come back up
*/
export default {
ping: async ({ monitor }: $TSFixMe) => {
if (monitor && monitor.type) {
if (monitor.data.url) {
const headers: $TSFixMe = await ApiService.headers(
monitor.headers,
monitor.bodyType
);
const body: $TSFixMe = await ApiService.body(
monitor && monitor.text && monitor.text.length
? monitor.text
: monitor.formData,
monitor && monitor.text && monitor.text.length
? 'text'
: 'formData'
);
let retry: $TSFixMe = true;
let retryCount: $TSFixMe = 0;
while (retry || retryCount > 2) {
const { res, resp, rawResp }: $TSFixMe = await pingfetch(
monitor.data.url,
monitor.method,
body,
headers
);
logger.info(
`Monitor ID ${monitor._id}: Start saving data to ingestor.`
);
const response: $TSFixMe = await ApiService.ping(
monitor._id,
{
monitor,
res,
resp,
rawResp,
type: monitor.type,
retryCount,
}
);
logger.info(
`Monitor ID ${monitor._id}: End saving data to ingestor.`
);
if (response && !response.retry) {
retry = false;
} else {
retryCount++;
}
}
}
}
},
};

View File

@@ -1,7 +0,0 @@
import ContainerService from '../Utils/containerService';
export default {
scan: async (security: $TSFixMe) => {
await ContainerService.scan(security);
},
};

View File

@@ -1,46 +0,0 @@
import moment from 'moment';
import ApiService from '../Utils/apiService';
/*
* It collects all IOT device monitors then check the last time they where pinged
* If the difference is greater than 2 minutes
* Creates incident if a website is down and resolves it when they come back up
*/
export default {
ping: async (monitor: $TSFixMe) => {
const newDate: $TSFixMe = new moment();
const resDate: $TSFixMe = new Date();
if (monitor && monitor.type) {
const d: $TSFixMe = new moment(monitor.lastPingTime);
if (newDate.diff(d, 'minutes') > 3) {
const time: $TSFixMe = await ApiService.getMonitorTime(
monitor._id,
newDate
);
if (time.status === 'online') {
await ApiService.ping(monitor._id, {
monitor,
type: monitor.type,
});
}
} else {
const res: $TSFixMe = new Date().getTime() - resDate.getTime();
const newTime: $TSFixMe = await ApiService.getMonitorTime(
monitor._id,
newDate
);
if (newTime.status === 'offline') {
await ApiService.ping(monitor._id, {
monitor,
res,
type: monitor.type,
});
}
}
}
},
};

View File

@@ -1,89 +0,0 @@
import logger from 'CommonServer/Utils/Logger';
import ProbeAPI from '../Utils/ProbeAPI';
import ApiMonitors from './ApiMonitors';
import UrlMonitors from './UrlMonitors';
import IPMonitors from './IpMonitors';
import ServerMonitors from './ServerMonitors';
import sleep from 'sleep-promise';
import IncomingHttpRequestMonitors from './incomingHttpRequestMonitors';
import KubernetesMonitors from './kubernetesMonitors';
import { ResourcesLimit } from '../Config';
const _this: $TSFixMe = {
runJob: async function (): void {
logger.info(`Getting a list of ${ResourcesLimit.toString()} monitors`);
let monitors: $TSFixMe = await ProbeAPI.get(
'probe/monitors',
ResourcesLimit.toNumber()
);
monitors = JSON.parse(monitors.data); // Parse the stringified data
logger.info(`Number of Monitors fetched - ${monitors.length} monitors`);
if (monitors.length === 0) {
// There are no monitors to monitor. Sleep for 30 seconds and then wake up.
logger.info('No monitors to monitor. Sleeping for 30 seconds.');
await sleep(30 * 1000);
}
// Loop over the monitor
for (const monitor of monitors) {
logger.info(`Monitor ID ${monitor._id}: Currently monitoring`);
if (monitor.type === 'api') {
logger.info(
`Monitor ID ${monitor._id}: Start monitoring API monitor`
);
await ApiMonitors.ping({ monitor });
logger.info(
`Monitor ID ${monitor._id}: End monitoring API monitor`
);
} else if (monitor.type === 'url') {
logger.info(
`Monitor ID ${monitor._id}: Start monitoring URL monitor`
);
await UrlMonitors.ping({ monitor });
logger.info(
`Monitor ID ${monitor._id}: End monitoring URL monitor`
);
} else if (monitor.type === 'ip') {
logger.info(
`Monitor ID ${monitor._id}: Start monitoring IP monitor`
);
await IPMonitors.ping({ monitor });
logger.info(
`Monitor ID ${monitor._id}: End monitoring IP monitor`
);
} else if (
monitor.type === 'server-monitor' &&
monitor.agentlessConfig
) {
logger.info(
`Monitor ID ${monitor._id}: Start monitoring Server monitor`
);
await ServerMonitors.run({ monitor });
logger.info(
`Monitor ID ${monitor._id}: End monitoring Server monitor`
);
} else if (monitor.type === 'incomingHttpRequest') {
logger.info(
`Monitor ID ${monitor._id}: Start monitoring Incoming HTTP Request monitor`
);
await IncomingHttpRequestMonitors.run({ monitor });
logger.info(
`Monitor ID ${monitor._id}: End monitoring Incoming HTTP Request monitor`
);
} else if (monitor.type === 'kubernetes') {
logger.info(
`Monitor ID ${monitor._id}: Start monitoring Kubernetes monitor`
);
KubernetesMonitors.run({ monitor });
logger.info(
`Monitor ID ${monitor._id}: End monitoring Kubernetes monitor`
);
}
}
},
};
export default _this;

View File

@@ -1,73 +0,0 @@
import ApiService from '../Utils/apiService';
import ping from 'ping';
/*
* It collects all monitors then ping them one by one to store their response
* Checks if the IP Address of the IP monitor is up or down
* Creates incident if a IP Address is down and resolves it when they come back up
*/
export default {
ping: async ({ monitor }: $TSFixMe) => {
if (monitor && monitor.type) {
if (monitor.data.IPAddress) {
let retry: $TSFixMe = true;
let retryCount: $TSFixMe = 0;
while (retry || retryCount > 2) {
const { res, resp, rawResp }: $TSFixMe = await pingfetch(
monitor.data.IPAddress
);
const response: $TSFixMe = await ApiService.ping(
monitor._id,
{
monitor,
res,
resp,
rawResp,
type: monitor.type,
retryCount,
}
);
if (response && !response.retry) {
retry = false;
} else {
retryCount++;
}
}
}
}
},
};
const pingfetch: Function = async (IPAddress: $TSFixMe): void => {
const now: $TSFixMe = new Date().getTime();
let resp: $TSFixMe = null;
let rawResp: $TSFixMe = null;
let res: $TSFixMe = null;
try {
const response: $TSFixMe = await ping.promise.probe(IPAddress, {
timeout: 120,
extra: ['-i', '2'],
});
const isAlive: $TSFixMe = response ? response.alive : false;
res = new Date().getTime() - now;
resp = {
status: isAlive ? 200 : 408,
body: null,
};
rawResp = {
body: null,
status: isAlive ? 200 : 408,
};
} catch (error) {
res = new Date().getTime() - now;
resp = { status: 408, body: error };
}
return { res, resp, rawResp };
};

View File

@@ -1,839 +0,0 @@
import { spawn } from 'child_process';
import fs from 'fs';
import Path from 'path';
import fetch from 'node-fetch-commonjs';
import { v4 as uuidv4 } from 'uuid';
import ApiService from '../Utils/apiService';
import { serverUrl } from '../Config';
import { deleteFile } from '../Utils/fsHandlers';
export default {
run: async function ({ monitor }): void {
if (
monitor &&
monitor.type &&
monitor.type === 'kubernetes' &&
monitor.kubernetesConfig
) {
const configurationFile: $TSFixMe = monitor.kubernetesConfig;
const updatedConfigName: string = `${uuidv4()}${configurationFile}`;
const configPath: $TSFixMe = Path.resolve(
process.cwd(),
updatedConfigName
);
const namespace: $TSFixMe =
monitor.kubernetesNamespace || 'default';
await fetch(`${serverUrl}/file/${configurationFile}`).then(
(res: $TSFixMe) => {
const dest: $TSFixMe = fs.createWriteStream(configPath);
res.body.pipe(dest);
// At this point, writing to the specified file is complete
dest.on('finish', async () => {
if (fs.existsSync(configPath)) {
const [
podOutput,
jobOutput,
serviceOutput,
deploymentOutput,
statefulsetOutput,
] = await Promise.all([
loadPodOutput(configPath, namespace),
loadJobOutput(configPath, namespace),
loadServiceOutput(configPath, namespace),
loadDeploymentOutput(configPath, namespace),
loadStatefulsetOutput(configPath, namespace),
]);
if (
podOutput &&
jobOutput &&
deploymentOutput &&
statefulsetOutput
) {
// Handle pod output
const healthyPods: $TSFixMe = [],
healthyPodData: $TSFixMe = [],
unhealthyPods: $TSFixMe = [],
unhealthyPodData: $TSFixMe = [],
allPods: $TSFixMe = [],
allPodData: $TSFixMe = [];
let runningPods: $TSFixMe = 0,
completedPods: $TSFixMe = 0,
failedPods: $TSFixMe = 0;
podOutput.items.forEach((item: $TSFixMe) => {
/**
* https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.20/#podstatus-v1-core
*/
if (
item.status.phase !== 'Running' &&
item.status.phase !== 'Succeeded'
) {
unhealthyPods.push({
podName: item.metadata.name,
podNamespace:
item.metadata.namespace,
podStatus: item.status.phase,
podCreationTimestamp:
item.metadata.creationTimestamp,
podRestart:
item.status &&
item.status.containerStatuses &&
item.status.containerStatuses[0]
? item.status
.containerStatuses[0]
.restartCount
: 0,
podResourceVersion:
item.metadata.resourceVersion,
podUid: item.metadata.uid,
podSelfLink: item.metadata.selfLink,
podConditions:
item.status.conditions,
podContainerStatuses:
item.status.containerStatuses,
podContainers: item.spec.containers,
});
unhealthyPodData.push({
podName: item.metadata.name,
podNamespace:
item.metadata.namespace,
podStatus: item.status.phase,
podCreationTimestamp:
item.metadata.creationTimestamp,
podRestart:
item.status &&
item.status.containerStatuses &&
item.status.containerStatuses[0]
? item.status
.containerStatuses[0]
.restartCount
: 0,
});
failedPods += 1;
} else {
healthyPods.push({
podName: item.metadata.name,
podNamespace:
item.metadata.namespace,
podStatus: item.status.phase,
podCreationTimestamp:
item.metadata.creationTimestamp,
podRestart:
item.status &&
item.status.containerStatuses &&
item.status.containerStatuses[0]
? item.status
.containerStatuses[0]
.restartCount
: 0,
podResourceVersion:
item.metadata.resourceVersion,
podUid: item.metadata.uid,
podSelfLink: item.metadata.selfLink,
podConditions:
item.status.conditions,
podContainerStatuses:
item.status.containerStatuses,
podContainers: item.spec.containers,
});
healthyPodData.push({
podName: item.metadata.name,
podNamespace:
item.metadata.namespace,
podStatus: item.status.phase,
podCreationTimestamp:
item.metadata.creationTimestamp,
podRestart:
item.status &&
item.status.containerStatuses &&
item.status.containerStatuses[0]
? item.status
.containerStatuses[0]
.restartCount
: 0,
});
if (item.status.phase === 'Running') {
++runningPods;
}
if (item.status.phase === 'Succeeded') {
++completedPods;
}
}
allPods.push({
podName: item.metadata.name,
podNamespace: item.metadata.namespace,
podStatus: item.status.phase,
podCreationTimestamp:
item.metadata.creationTimestamp,
podRestart:
item.status &&
item.status.containerStatuses &&
item.status.containerStatuses[0]
? item.status
.containerStatuses[0]
.restartCount
: 0,
podResourceVersion:
item.metadata.resourceVersion,
podUid: item.metadata.uid,
podSelfLink: item.metadata.selfLink,
podConditions: item.status.conditions,
podContainerStatuses:
item.status.containerStatuses,
podContainers: item.spec.containers,
});
allPodData.push({
podName: item.metadata.name,
podNamespace: item.metadata.namespace,
podStatus: item.status.phase,
podCreationTimestamp:
item.metadata.creationTimestamp,
podRestart:
item.status &&
item.status.containerStatuses &&
item.status.containerStatuses[0]
? item.status
.containerStatuses[0]
.restartCount
: 0,
});
});
const podData: $TSFixMe = {
podStat: {
healthy: healthyPods.length,
unhealthy: unhealthyPods.length,
runningPods,
completedPods,
failedPods,
totalPods: podOutput.items.length,
},
healthyPods,
unhealthyPods,
allPods,
healthyPodData,
unhealthyPodData,
allPodData,
};
// Handle job output
const runningJobs: $TSFixMe = [],
succeededJobs: $TSFixMe = [],
failedJobs: $TSFixMe = [],
runningJobData: $TSFixMe = [],
succeededJobData: $TSFixMe = [],
failedJobData: $TSFixMe = [];
jobOutput.items.forEach((item: $TSFixMe) => {
/**
* https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.20/#job-v1-batch
*/
if (item.status && item.status.active > 0) {
runningJobs.push({
jobName: item.metadata.name,
jobNamespace:
item.metadata.namespace,
jobStatus: 'running',
jobCreationTimestamp:
item.metadata.creationTimestamp,
jobResourceVersion:
item.metadata.resourceVersion,
jobUid: item.metadata.uid,
jobSelfLink: item.metadata.selfLink,
jobConditions:
item.status.conditions,
});
runningJobData.push({
jobName: item.metadata.name,
jobNamespace:
item.metadata.namespace,
jobStatus: 'running',
jobCreationTimestamp:
item.metadata.creationTimestamp,
});
} else if (
item.status &&
item.status.succeeded > 0
) {
succeededJobs.push({
jobName: item.metadata.name,
jobNamespace:
item.metadata.namespace,
jobStatus: 'succeeded',
jobCreationTimestamp:
item.metadata.creationTimestamp,
jobResourceVersion:
item.metadata.resourceVersion,
jobUid: item.metadata.uid,
jobSelfLink: item.metadata.selfLink,
jobConditions:
item.status.conditions,
});
succeededJobData.push({
jobName: item.metadata.name,
jobNamespace:
item.metadata.namespace,
jobStatus: 'succeeded',
jobCreationTimestamp:
item.metadata.creationTimestamp,
});
} else if (
item.status &&
item.status.failed > 0
) {
failedJobs.push({
jobName: item.metadata.name,
jobNamespace:
item.metadata.namespace,
jobStatus: 'failed',
jobCreationTimestamp:
item.metadata.creationTimestamp,
jobResourceVersion:
item.metadata.resourceVersion,
jobUid: item.metadata.uid,
jobSelfLink: item.metadata.selfLink,
jobConditions:
item.status.conditions,
});
failedJobData.push({
jobName: item.metadata.name,
jobNamespace:
item.metadata.namespace,
jobStatus: 'failed',
jobCreationTimestamp:
item.metadata.creationTimestamp,
});
} else {
failedJobs.push({
jobName: item.metadata.name,
jobNamespace:
item.metadata.namespace,
jobStatus: 'failed',
jobCreationTimestamp:
item.metadata.creationTimestamp,
jobResourceVersion:
item.metadata.resourceVersion,
jobUid: item.metadata.uid,
jobSelfLink: item.metadata.selfLink,
jobConditions:
item.status.conditions,
});
failedJobData.push({
jobName: item.metadata.name,
jobNamespace:
item.metadata.namespace,
jobStatus: 'failed',
jobCreationTimestamp:
item.metadata.creationTimestamp,
});
}
});
const jobData: $TSFixMe = {
jobStat: {
runningJobs: runningJobs.length,
succeededJobs: succeededJobs.length,
failedJobs: failedJobs.length,
totalJobs:
runningJobs.length +
succeededJobs.length +
failedJobs.length,
healthy:
runningJobs.length +
succeededJobs.length,
unhealthy: failedJobs.length,
},
runningJobs,
succeededJobs,
failedJobs,
allJobs: [
...runningJobs,
...succeededJobs,
...failedJobs,
],
allJobData: [
...runningJobData,
...succeededJobData,
...failedJobData,
],
healthyJobs: [
...runningJobs,
...succeededJobs,
],
healthyJobData: [
...runningJobData,
...succeededJobData,
],
unhealthyJobs: [...failedJobs],
unhealthyJobData: [...failedJobData],
};
// Handle services output
const serviceData: $TSFixMe = {
runningServices: serviceOutput.items.length,
};
// Handle deployment output
let desiredDeployment: $TSFixMe = 0,
readyDeployment: $TSFixMe = 0;
const unhealthyDeployments: $TSFixMe = [],
healthyDeployments: $TSFixMe = [],
allDeployments: $TSFixMe = [],
unhealthyDeploymentData: $TSFixMe = [],
healthyDeploymentData: $TSFixMe = [],
allDeploymentData: $TSFixMe = [];
deploymentOutput.items.forEach(
(item: $TSFixMe) => {
if (item.status.readyReplicas) {
readyDeployment +=
item.status.readyReplicas;
} else {
readyDeployment += 0;
}
desiredDeployment +=
item.status.replicas;
if (
item.status.readyReplicas !==
item.status.replicas
) {
unhealthyDeployments.push({
deploymentName:
item.metadata.name,
deploymentNamespace:
item.metadata.namespace,
deploymentCreationTimestamp:
item.metadata
.creationTimestamp,
readyDeployment:
item.status.readyReplicas ||
0,
desiredDeployment:
item.status.replicas,
deploymentResourceVersion:
item.metadata
.resourceVersion,
deploymentUid:
item.metadata.uid,
deploymentSelfLink:
item.metadata.selfLink,
deploymentConditions:
item.status.conditions,
});
unhealthyDeploymentData.push({
deploymentName:
item.metadata.name,
deploymentNamespace:
item.metadata.namespace,
deploymentCreationTimestamp:
item.metadata
.creationTimestamp,
readyDeployment:
item.status.readyReplicas ||
0,
desiredDeployment:
item.status.replicas,
});
} else {
healthyDeployments.push({
deploymentName:
item.metadata.name,
deploymentNamespace:
item.metadata.namespace,
deploymentCreationTimestamp:
item.metadata
.creationTimestamp,
readyDeployment:
item.status.readyReplicas,
desiredDeployment:
item.status.replicas,
deploymentResourceVersion:
item.metadata
.resourceVersion,
deploymentUid:
item.metadata.uid,
deploymentSelfLink:
item.metadata.selfLink,
deploymentConditions:
item.status.conditions,
});
healthyDeploymentData.push({
deploymentName:
item.metadata.name,
deploymentNamespace:
item.metadata.namespace,
deploymentCreationTimestamp:
item.metadata
.creationTimestamp,
readyDeployment:
item.status.readyReplicas,
desiredDeployment:
item.status.replicas,
});
}
allDeployments.push({
deploymentName: item.metadata.name,
deploymentNamespace:
item.metadata.namespace,
deploymentCreationTimestamp:
item.metadata.creationTimestamp,
readyDeployment:
item.status.readyReplicas || 0,
desiredDeployment:
item.status.replicas,
deploymentResourceVersion:
item.metadata.resourceVersion,
deploymentUid: item.metadata.uid,
deploymentSelfLink:
item.metadata.selfLink,
deploymentConditions:
item.status.conditions,
});
allDeploymentData.push({
deploymentName: item.metadata.name,
deploymentNamespace:
item.metadata.namespace,
deploymentCreationTimestamp:
item.metadata.creationTimestamp,
readyDeployment:
item.status.readyReplicas || 0,
desiredDeployment:
item.status.replicas,
});
}
);
const deploymentData: $TSFixMe = {
desiredDeployment,
readyDeployment,
healthyDeployments,
unhealthyDeployments,
allDeployments,
healthy: healthyDeployments.length,
unhealthy: unhealthyDeployments.length,
healthyDeploymentData,
unhealthyDeploymentData,
allDeploymentData,
};
// Handle statefulset output
let desiredStatefulsets: $TSFixMe = 0,
readyStatefulsets: $TSFixMe = 0;
const healthyStatefulsets: $TSFixMe = [],
unhealthyStatefulsets: $TSFixMe = [],
allStatefulset: $TSFixMe = [],
healthyStatefulsetData: $TSFixMe = [],
unhealthyStatefulsetData: $TSFixMe = [],
allStatefulsetData: $TSFixMe = [];
statefulsetOutput.items.forEach(
(item: $TSFixMe) => {
if (item.status.readyReplicas) {
readyStatefulsets +=
item.status.readyReplicas;
} else {
readyStatefulsets += 0;
}
desiredStatefulsets +=
item.status.replicas;
if (
item.status.readyReplicas !==
item.status.replicas
) {
unhealthyStatefulsets.push({
statefulsetName:
item.metadata.name,
statefulsetNamespace:
item.metadata.namespace,
statefulsetCreationTimestamp:
item.metadata
.creationTimestamp,
readyStatefulsets:
item.status.readyReplicas ||
0,
desiredStatefulsets:
item.status.replicas,
statefulsetResourceVersion:
item.metadata
.resourceVersion,
statefulsetUid:
item.metadata.uid,
statefulsetSelfLink:
item.metadata.selfLink,
});
unhealthyStatefulsetData.push({
statefulsetName:
item.metadata.name,
statefulsetNamespace:
item.metadata.namespace,
statefulsetCreationTimestamp:
item.metadata
.creationTimestamp,
readyStatefulsets:
item.status.readyReplicas ||
0,
desiredStatefulsets:
item.status.replicas,
});
} else {
healthyStatefulsets.push({
statefulsetName:
item.metadata.name,
statefulsetNamespace:
item.metadata.namespace,
statefulsetCreationTimestamp:
item.metadata
.creationTimestamp,
readyStatefulsets:
item.status.readyReplicas,
desiredStatefulsets:
item.status.replicas,
statefulsetResourceVersion:
item.metadata
.resourceVersion,
statefulsetUid:
item.metadata.uid,
statefulsetSelfLink:
item.metadata.selfLink,
});
healthyStatefulsetData.push({
statefulsetName:
item.metadata.name,
statefulsetNamespace:
item.metadata.namespace,
statefulsetCreationTimestamp:
item.metadata
.creationTimestamp,
readyStatefulsets:
item.status.readyReplicas,
desiredStatefulsets:
item.status.replicas,
});
}
allStatefulset.push({
statefulsetName: item.metadata.name,
statefulsetNamespace:
item.metadata.namespace,
statefulsetCreationTimestamp:
item.metadata.creationTimestamp,
readyStatefulsets:
item.status.readyReplicas || 0,
desiredStatefulsets:
item.status.replicas,
statefulsetResourceVersion:
item.metadata.resourceVersion,
statefulsetUid: item.metadata.uid,
statefulsetSelfLink:
item.metadata.selfLink,
});
allStatefulsetData.push({
statefulsetName: item.metadata.name,
statefulsetNamespace:
item.metadata.namespace,
statefulsetCreationTimestamp:
item.metadata.creationTimestamp,
readyStatefulsets:
item.status.readyReplicas || 0,
desiredStatefulsets:
item.status.replicas,
});
}
);
const statefulsetData: $TSFixMe = {
readyStatefulsets,
desiredStatefulsets,
healthyStatefulsets,
unhealthyStatefulsets,
allStatefulset,
healthy: healthyStatefulsets.length,
unhealthy: unhealthyStatefulsets.length,
healthyStatefulsetData,
unhealthyStatefulsetData,
allStatefulsetData,
};
const data: $TSFixMe = {
podData,
jobData,
serviceData,
deploymentData,
statefulsetData,
};
await ApiService.ping(monitor._id, {
monitor,
kubernetesData: data,
type: monitor.type,
});
// Remove the config file
await deleteFile(configPath);
}
}
// Remove the config file
await deleteFile(configPath);
});
dest.on('error', async (error: Error) => {
await deleteFile(configPath);
throw error;
});
}
);
}
},
};
function loadPodOutput(configPath, namespace): void {
return new Promise((resolve: $TSFixMe) => {
let podOutput: $TSFixMe = '';
const podCommand: string = `kubectl get pods -o json --kubeconfig ${configPath} --namespace ${namespace}`;
const podCommandOutput: $TSFixMe = spawn(podCommand, {
cwd: process.cwd(),
shell: true,
});
podCommandOutput.stdout.on('data', (data: $TSFixMe) => {
const strData: $TSFixMe = data.toString();
podOutput += strData;
});
podCommandOutput.on('close', () => {
if (podOutput) {
podOutput = JSON.parse(podOutput);
}
resolve(podOutput);
});
});
}
function loadJobOutput(configPath, namespace): void {
return new Promise((resolve: $TSFixMe) => {
let jobOutput: $TSFixMe = '';
const jobCommand: string = `kubectl get jobs -o json --kubeconfig ${configPath} --namespace ${namespace}`;
const jobCommandOutput: $TSFixMe = spawn(jobCommand, {
cwd: process.cwd(),
shell: true,
});
jobCommandOutput.stdout.on('data', (data: $TSFixMe) => {
const strData: $TSFixMe = data.toString();
jobOutput += strData;
});
jobCommandOutput.on('close', () => {
if (jobOutput) {
jobOutput = JSON.parse(jobOutput);
}
resolve(jobOutput);
});
});
}
function loadServiceOutput(configPath, namespace): void {
return new Promise((resolve: $TSFixMe) => {
let serviceOutput: $TSFixMe = '';
const serviceCommand: string = `kubectl get services -o json --kubeconfig ${configPath} --namespace ${namespace}`;
const serviceCommandOutput: $TSFixMe = spawn(serviceCommand, {
cwd: process.cwd(),
shell: true,
});
serviceCommandOutput.stdout.on('data', (data: $TSFixMe) => {
const strData: $TSFixMe = data.toString();
serviceOutput += strData;
});
serviceCommandOutput.on('close', () => {
if (serviceOutput) {
serviceOutput = JSON.parse(serviceOutput);
}
resolve(serviceOutput);
});
});
}
function loadDeploymentOutput(configPath, namespace): void {
return new Promise((resolve: $TSFixMe) => {
let deploymentOutput: $TSFixMe = '';
const deploymentCommand: string = `kubectl get deployments -o json --kubeconfig ${configPath} --namespace ${namespace}`;
const deploymentCommandOutput: $TSFixMe = spawn(deploymentCommand, {
cwd: process.cwd(),
shell: true,
});
deploymentCommandOutput.stdout.on('data', (data: $TSFixMe) => {
const strData: $TSFixMe = data.toString();
deploymentOutput += strData;
});
deploymentCommandOutput.on('close', () => {
if (deploymentOutput) {
deploymentOutput = JSON.parse(deploymentOutput);
}
resolve(deploymentOutput);
});
});
}
function loadStatefulsetOutput(configPath, namespace): void {
return new Promise((resolve: $TSFixMe) => {
let statefulsetOutput: $TSFixMe = '';
const statefulsetCommand: string = `kubectl get statefulsets -o json --kubeconfig ${configPath} --namespace ${namespace}`;
const statefulsetCommandOutput: $TSFixMe = spawn(statefulsetCommand, {
cwd: process.cwd(),
shell: true,
});
statefulsetCommandOutput.stdout.on('data', (data: $TSFixMe) => {
const strData: $TSFixMe = data.toString();
statefulsetOutput += strData;
});
statefulsetCommandOutput.on('close', () => {
if (statefulsetOutput) {
statefulsetOutput = JSON.parse(statefulsetOutput);
}
resolve(statefulsetOutput);
});
});
}

View File

@@ -1,364 +0,0 @@
import ApiService from '../Utils/apiService';
import fs from 'fs';
import { NodeSSH } from 'node-ssh';
import fetch from 'node-fetch-commonjs';
import { COMMAND, serverUrl } from '../Config';
export default {
run: async ({ monitor }: $TSFixMe) => {
if (
monitor &&
monitor.type &&
monitor.agentlessConfig &&
typeof monitor.agentlessConfig === 'object'
) {
const {
host,
port,
username,
authentication,
password,
identityFile,
} = monitor.agentlessConfig;
const ssh: $TSFixMe = new NodeSSH();
const config: $TSFixMe = {
host,
port,
username,
};
if (authentication === 'password') {
config.password = password;
} else {
await fetch(`${serverUrl}/file/${identityFile}`).then(
(res: $TSFixMe) => {
return new Promise(
(resolve: Function, reject: Function) => {
const dest: $TSFixMe = fs.createWriteStream(
`./${identityFile}`
);
res.body.pipe(dest);
res.body.on('end', () => {
setTimeout(() => {
config.privateKey = fs.readFileSync(
`./${identityFile}`,
'utf8'
);
resolve();
}, 1000);
});
dest.on('error', reject);
}
);
}
);
fs.unlinkSync(`./${identityFile}`);
}
ssh.connect(config).then(async (): void => {
let os: $TSFixMe;
try {
const { stdout: osLine, stderr } = await ssh.execCommand(
'uname -a'
);
if (stderr) {
throw stderr;
}
os = osLine.split(' ')[0];
} catch (e) {
const { stdout: osLine } = await ssh.execCommand(
'wmic os get name'
);
os = osLine.split(' ')[1];
}
const serverData: $TSFixMe = await execCommands(ssh, os);
ssh.dispose();
await ApiService.ping(monitor._id, {
monitor,
serverData,
type: monitor.type,
});
});
}
},
};
const execCommands: Function = async (exec: $TSFixMe, os: $TSFixMe): void => {
const isSSH: $TSFixMe = exec instanceof NodeSSH;
// TODO: complete commands and make platform specific
let cpuLoad: $TSFixMe,
avgCpuLoad: $TSFixMe,
cpuCores: $TSFixMe,
memoryUsed: $TSFixMe,
totalMemory: $TSFixMe,
swapUsed: $TSFixMe,
storageUsed: $TSFixMe,
totalStorage: $TSFixMe,
storageUsage: $TSFixMe,
mainTemp: $TSFixMe,
maxTemp: $TSFixMe;
if (os === 'Linux') {
const { stdout: load } = await (isSSH
? exec.execCommand(COMMAND.linux.load)
: exec(COMMAND.linux.load));
const { stdout: cpu } = await (isSSH
? exec.execCommand(COMMAND.linux.cpu)
: exec(COMMAND.linux.cpu));
const { stdout: mem } = await (isSSH
? exec.execCommand(COMMAND.linux.mem)
: exec(COMMAND.linux.mem));
const { stdout: disk } = await (isSSH
? exec.execCommand(COMMAND.linux.disk)
: exec(COMMAND.linux.disk));
const { stdout: temp } = await (isSSH
? exec.execCommand(COMMAND.linux.temp)
: exec(COMMAND.linux.temp));
const loadLines: $TSFixMe = load
.replace(/\t|:|,|-/gi, '')
.trim()
.split('\n')
.map((line: $TSFixMe) => {
const words: $TSFixMe = line
.replace(/\s+/g, ' ')
.trim()
.split(' ');
return words;
});
const cpuLines: $TSFixMe = cpu
.replace(/\t|:/gi, '')
.trim()
.split('\n')
.map((line: $TSFixMe) => {
return line.replace(/\s+/g, ' ').trim();
});
const memLines: $TSFixMe = mem
.replace(/\t|:/gi, '')
.trim()
.split('\n')
.map((line: $TSFixMe) => {
const words: $TSFixMe = line
.replace(/\s+/g, ' ')
.trim()
.split(' ');
return words[words.length - 2];
});
const diskLines: $TSFixMe = disk
.replace(/\t|:|M|G|%/gi, '')
.trim()
.split('\n')
.map((line: $TSFixMe) => {
const words: $TSFixMe = line
.replace(/\s+/g, ' ')
.trim()
.split(' ');
return {
storageUsed: words[2],
totalStorage: words[1],
storageUsage: words[4],
};
})
.reduce((disks: $TSFixMe, disk: $TSFixMe) => {
return {
storageUsed: disks.storageUsed + disk.storageUsed,
totalStorage: disks.totalStorage + disk.totalStorage,
storageUsage: disks.storageUsage + disk.storageUsage,
};
});
const tempLines: $TSFixMe = temp
.replace(/\t|:|\+|°|C/gi, '')
.replace(/\s+/g, ' ')
.trim()
.split(' ');
cpuLoad = loadLines[3][1];
avgCpuLoad = loadLines[2][10];
cpuCores = cpuLines.length / 2;
memoryUsed = (parseFloat(memLines[0]) - parseFloat(memLines[1])) * 1024;
totalMemory = memLines[0] * 1024;
swapUsed = (parseFloat(memLines[4]) - parseFloat(memLines[5])) * 1024;
storageUsed = diskLines.storageUsed * 1024 * 1024 * 1024;
totalStorage = diskLines.totalStorage * 1024 * 1024 * 1024;
storageUsage = diskLines.storageUsage;
mainTemp = tempLines[1];
maxTemp = tempLines[1];
} else if (os === 'Darwin') {
const { stdout: load } = await (isSSH
? exec.execCommand(COMMAND.darwin.load)
: exec(COMMAND.darwin.load));
const { stdout: cpu } = await (isSSH
? exec.execCommand(COMMAND.darwin.cpu)
: exec(COMMAND.darwin.cpu));
const { stdout: usedMem } = await (isSSH
? exec.execCommand(COMMAND.darwin.mem.used)
: exec(COMMAND.darwin.mem.used));
const { stdout: totalMem } = await (isSSH
? exec.execCommand(COMMAND.darwin.mem.total)
: exec(COMMAND.darwin.mem.total));
const { stdout: swapMem } = await (isSSH
? exec.execCommand(COMMAND.darwin.mem.swap)
: exec(COMMAND.darwin.mem.swap));
const { stdout: disk } = await (isSSH
? exec.execCommand(COMMAND.darwin.disk)
: exec(COMMAND.darwin.disk));
const { stdout: temp } = await (isSSH
? exec.execCommand(COMMAND.darwin.temp)
: exec(COMMAND.darwin.temp));
const loadLines: $TSFixMe = load
.replace(/\t|:|,|-|%/gi, '')
.trim()
.split('\n')
.map((line: $TSFixMe) => {
const words: $TSFixMe = line
.replace(/\s+/g, ' ')
.trim()
.split(' ');
return words;
});
const memLines: $TSFixMe = usedMem
.replace(/\t|:|M|G|\(|\)/gi, '')
.replace(/\s+/g, ' ')
.trim()
.split(' ');
const swapLines: $TSFixMe = swapMem
.replace(/\t|:|M|G|\(|\)|=/gi, '')
.replace(/\s+/g, ' ')
.trim()
.split(' ');
const diskLines: $TSFixMe = disk
.replace(/\t|:|Mi|Gi|%/gi, '')
.trim()
.split('\n')
.map((line: $TSFixMe) => {
const words: $TSFixMe = line
.replace(/\s+/g, ' ')
.trim()
.split(' ');
return {
storageUsed: words[2],
totalStorage: words[1],
storageUsage: words[4],
};
})
.reduce((disks: $TSFixMe, disk: $TSFixMe) => {
return {
storageUsed: disks.storageUsed + disk.storageUsed,
totalStorage: disks.totalStorage + disk.totalStorage,
storageUsage: disks.storageUsage + disk.storageUsage,
};
});
cpuLoad = loadLines[1][2];
avgCpuLoad = loadLines[0][3];
cpuCores = cpu.replace('\n', '');
memoryUsed =
(parseFloat(memLines[1]) - parseFloat(memLines[3])) * 1024 * 1024;
totalMemory = totalMem.replace('\n', '');
swapUsed = swapLines[3] * 1024 * 1024;
storageUsed = diskLines.storageUsed * 1024 * 1024 * 1024;
totalStorage = diskLines.totalStorage * 1024 * 1024 * 1024;
storageUsage = diskLines.storageUsage;
mainTemp = temp.replace('\n', '');
maxTemp = temp.replace('\n', '');
} else if (os === 'Windows') {
const { stdout: load } = await (isSSH
? exec.execCommand(COMMAND.win.load)
: exec(COMMAND.win.load));
const { stdout: cpu } = await (isSSH
? exec.execCommand(COMMAND.win.cpu)
: exec(COMMAND.win.cpu));
const { stdout: freeMem } = await (isSSH
? exec.execCommand(COMMAND.win.mem.free)
: exec(COMMAND.win.mem.free));
const { stdout: totalMem } = await (isSSH
? exec.execCommand(COMMAND.win.mem.total)
: exec(COMMAND.win.mem.total));
const { stdout: totalSwapMem } = await (isSSH
? exec.execCommand(COMMAND.win.mem.totalSwap)
: exec(COMMAND.win.mem.totalSwap));
const { stdout: freeSwapMem } = await (isSSH
? exec.execCommand(COMMAND.win.mem.freeSwap)
: exec(COMMAND.win.mem.freeSwap));
const { stdout: freeDisk } = await (isSSH
? exec.execCommand(COMMAND.win.disk.free)
: exec(COMMAND.win.disk.free));
const { stdout: totalDisk } = await (isSSH
? exec.execCommand(COMMAND.win.disk.total)
: exec(COMMAND.win.disk.total));
const { stdout: temp } = await (isSSH
? exec.execCommand(COMMAND.win.temp)
: exec(COMMAND.win.temp));
const loadLines: $TSFixMe = load.replace(/\s+/g, ' ').trim().split(' ');
const cpuLines: $TSFixMe = cpu.replace(/\s+/g, ' ').trim().split(' ');
const freeMemLines: $TSFixMe = freeMem
.replace(/\s+/g, ' ')
.trim()
.split(' ');
const totalMemLines: $TSFixMe = totalMem
.replace(/\s+/g, ' ')
.trim()
.split(' ');
const totalSwapMemLines: $TSFixMe = totalSwapMem
.replace(/\s+/g, ' ')
.trim()
.split(' ');
const freeSwapMemLines: $TSFixMe = freeSwapMem
.replace(/\s+/g, ' ')
.trim()
.split(' ');
const freeDiskLines: $TSFixMe = freeDisk
.replace(/\s+/g, ' ')
.trim()
.split(' ');
const totalDiskLines: $TSFixMe = totalDisk
.replace(/\s+/g, ' ')
.trim()
.split(' ');
const tempLines: $TSFixMe = temp.replace(/\s+/g, ' ').trim().split(' ');
cpuLoad = loadLines[1];
avgCpuLoad = loadLines[1];
cpuCores = cpuLines[1];
memoryUsed =
parseFloat(totalMemLines[1]) - parseFloat(freeMemLines[1]) * 1024;
totalMemory = totalMemLines[1];
swapUsed =
parseFloat(totalSwapMemLines[1]) - parseFloat(freeSwapMemLines[1]);
storageUsed =
parseFloat(totalDiskLines[1]) - parseFloat(freeDiskLines[1]);
totalStorage = totalDiskLines[1];
storageUsage = (storageUsed / parseFloat(totalDiskLines[1])) * 100;
mainTemp = tempLines[1];
maxTemp = tempLines[1];
}
return {
cpuLoad,
avgCpuLoad,
cpuCores,
memoryUsed,
totalMemory,
swapUsed,
storageUsed,
totalStorage,
storageUsage,
mainTemp,
maxTemp,
};
};

View File

@@ -1,49 +0,0 @@
import ApiService from '../Utils/apiService';
import logger from 'CommonServer/Utils/Logger';
import pingfetch from '../Utils/pingFetch';
/*
* It collects all monitors then ping them one by one to store their response
* Checks if the website of the url in the monitors is up or down
* Creates incident if a website is down and resolves it when they come back up
*/
export default {
ping: async ({ monitor }: $TSFixMe) => {
if (monitor && monitor.type) {
if (monitor.data.url) {
let retry: $TSFixMe = true;
let retryCount: $TSFixMe = 0;
while (retry || retryCount > 2) {
const { res, resp, rawResp }: $TSFixMe = await pingfetch(
monitor.data.url
);
logger.info(
`Monitor ID ${monitor._id}: Start saving data to ingestor.`
);
const response: $TSFixMe = await ApiService.ping(
monitor._id,
{
monitor,
res,
resp,
rawResp,
type: monitor.type,
retryCount,
}
);
logger.info(
`Monitor ID ${monitor._id}: End saving data to ingestor.`
);
if (response && !response.retry) {
retry = false;
} else {
retryCount++;
}
}
}
}
},
};

View File

@@ -11,22 +11,22 @@ describe('Ping', () => {
);
expect(result.responseTimeInMS?.toNumber()).toBeGreaterThan(0);
expect(result.responseTimeInMS?.toNumber()).toBeLessThanOrEqual(5000);
expect(result.isAlive).toBe(true);
expect(result.isOnline).toBe(true);
result = await Ping.fetch(new Hostname('www.google.com', 80), {
timeout: new PositiveNumber(5000),
});
expect(result.isAlive).toBe(true);
expect(result.isOnline).toBe(true);
expect(result.responseTimeInMS?.toNumber()).toBeGreaterThan(0);
expect(result.responseTimeInMS?.toNumber()).toBeLessThanOrEqual(5000);
result = await Ping.fetch(new Hostname('www.google.com', 65000), {
timeout: new PositiveNumber(5000),
});
expect(result.isAlive).toBe(false);
expect(result.isOnline).toBe(false);
expect(result.responseTimeInMS).toBeUndefined();
result = await Ping.fetch(new Hostname('www.a.com', 65000), {
timeout: new PositiveNumber(5000),
});
expect(result.isAlive).toBe(false);
expect(result.isOnline).toBe(false);
expect(result.responseTimeInMS).toBeUndefined();
});
test('Ping.fetch should return appropriate object if the valid IPV4 or IPV6 is given', async () => {
@@ -34,14 +34,14 @@ describe('Ping', () => {
result = await Ping.fetch(new IPv4('172.217.170.206'), {
timeout: new PositiveNumber(5000),
}); // One of the google ip
expect(result.isAlive).toBe(true);
expect(result.isOnline).toBe(true);
expect(result.responseTimeInMS?.toNumber()).toBeGreaterThan(0);
expect(result.responseTimeInMS?.toNumber()).toBeLessThanOrEqual(5000);
result = await Ping.fetch(new IPv4('192.0.2.200')); //
expect(result.isAlive).toBe(false);
expect(result.isOnline).toBe(false);
expect(result.responseTimeInMS).toBeUndefined();
result = await Ping.fetch(new IPv4('0.42.52.42')); // ip can't start 0
expect(result.responseTimeInMS).toBeUndefined();
expect(result.isAlive).toBe(false);
expect(result.isOnline).toBe(false);
});
});

View File

@@ -7,15 +7,17 @@ import net, { Socket } from 'net';
// TODO - make sure it work for the IPV6
export interface PingResponse {
isAlive: boolean;
isOnline: boolean;
responseTimeInMS?: PositiveNumber;
remoteAddressIP: IPv4 | IPv6;
remoteAddressPort: Port;
}
export interface PingOptions {
port?: PositiveNumber;
timeout?: PositiveNumber;
}
export default class Ping {
public static async fetch(
_host: Hostname | IPv4 | IPv6,
@@ -50,7 +52,7 @@ export default class Ping {
});
socket.on('timeout', () => {
resolve({
isAlive: false,
isOnline: false,
});
});
socket.on('connect', () => {
@@ -62,7 +64,7 @@ export default class Ping {
socket.end(() => {
resolve({
isAlive: true,
isOnline: true,
responseTimeInMS,
remoteAddressIP,
remoteAddressPort:
@@ -73,7 +75,7 @@ export default class Ping {
});
socket.on('error', () => {
resolve({
isAlive: false,
isOnline: false,
});
});
}