Parity pass on the other three language templates. Same guarantees as go/: survive server restart, client restart, half-open TCP, and long outages; rejoin and drain prime-side backlog on reconnect, without the user writing any of this in process.*. python/main.py: - grpc.keepalive_time_ms=10000, keepalive_timeout_ms=3000, keepalive_permit_without_calls=1 on the channel. Half-open TCP is detected within ~13s instead of the OS default ~2h. - Exponential backoff with jitter; max_backoff_seconds config ceiling (default 120). Attempts counter resets after a session runs healthy for 60s so transient restarts don't escalate the delay. - chain_id added as a required config field and sent as the x-chain-id gRPC metadata header (prime rejects streams without it). typescript/src/main.ts: - Same keepalive options on the @grpc/grpc-js client. - Same exponential backoff + jitter logic. - chain_id added to Config + metadata. bash/: - Config + README updated. The bash template uses Python's main.py as its runtime, so the behavioural changes above flow through without a separate main per language. Docs: each README gains a "Durability guarantees" section so contract authors see the invariants without reading the runtime code.
425 lines
12 KiB
TypeScript
Executable File
425 lines
12 KiB
TypeScript
Executable File
/**
|
|
* Dragonchain Smart Contract Client
|
|
*
|
|
* A gRPC client that connects to Dragonchain Prime server to process
|
|
* smart contract transactions.
|
|
*
|
|
* Do not modify this file unless you need to customize the client behavior.
|
|
* Implement your smart contract logic in process.ts instead.
|
|
*/
|
|
|
|
import * as grpc from "@grpc/grpc-js";
|
|
import * as protoLoader from "@grpc/proto-loader";
|
|
import * as fs from "fs";
|
|
import * as path from "path";
|
|
import * as yaml from "js-yaml";
|
|
|
|
import { ProcessResult, processTransaction } from "./process";
|
|
|
|
// Load proto definition
|
|
const PROTO_PATH = path.join(__dirname, "../proto/remote_sc.proto");
|
|
const packageDefinition = protoLoader.loadSync(PROTO_PATH, {
|
|
keepCase: false,
|
|
longs: String,
|
|
enums: String,
|
|
defaults: true,
|
|
oneofs: true,
|
|
});
|
|
|
|
const protoDescriptor = grpc.loadPackageDefinition(packageDefinition) as any;
|
|
const SmartContractService = protoDescriptor.remote_sc.SmartContractService;
|
|
|
|
// =============================================================================
|
|
// Configuration and Client Infrastructure
|
|
// Do not modify this file unless you need to customize the client behavior.
|
|
// Implement your smart contract logic in process.ts instead.
|
|
//
|
|
// Durability contract (provided by this file, no work for the user):
|
|
// - If the Dragonchain Prime server restarts, updates, or momentarily
|
|
// drops the network, this client auto-reconnects. Transactions
|
|
// observed during the outage are queued by prime and delivered once
|
|
// the stream is re-established.
|
|
// - If this client restarts (crash, deploy, long sleep), it rejoins
|
|
// the stream and prime re-delivers every still-pending transaction
|
|
// that should have invoked it, oldest first.
|
|
// - Half-open TCP (a silent peer that never sent FIN) is detected
|
|
// within ~13 s via gRPC keepalive pings. No dangling ghost streams.
|
|
// =============================================================================
|
|
|
|
interface Config {
|
|
serverAddress: string;
|
|
chainId: string;
|
|
smartContractId: string;
|
|
apiKey: string;
|
|
useTls: boolean;
|
|
tlsCertPath?: string;
|
|
numWorkers: number;
|
|
reconnectDelaySeconds: number;
|
|
maxBackoffSeconds: number;
|
|
maxReconnectAttempts: number;
|
|
}
|
|
|
|
interface SmartContractRequest {
|
|
transactionId: string;
|
|
transactionJson: string;
|
|
envVars: Record<string, string>;
|
|
secrets: Record<string, string>;
|
|
}
|
|
|
|
interface SmartContractResponse {
|
|
transactionId: string;
|
|
resultJson: string;
|
|
logs: string;
|
|
outputToChain: boolean;
|
|
error: string;
|
|
}
|
|
|
|
class SmartContractClient {
|
|
private config: Config;
|
|
private client: any;
|
|
private running: boolean = false;
|
|
private workQueue: SmartContractRequest[] = [];
|
|
private processing: Set<string> = new Set();
|
|
private stream: any;
|
|
|
|
constructor(config: Config) {
|
|
this.config = config;
|
|
}
|
|
|
|
/**
|
|
* Connect to the gRPC server.
|
|
*/
|
|
connect(): boolean {
|
|
try {
|
|
let credentials: grpc.ChannelCredentials;
|
|
|
|
if (this.config.useTls) {
|
|
if (!this.config.tlsCertPath) {
|
|
console.error("[SC-Client] TLS enabled but no certificate path provided");
|
|
return false;
|
|
}
|
|
const rootCert = fs.readFileSync(this.config.tlsCertPath);
|
|
credentials = grpc.credentials.createSsl(rootCert);
|
|
} else {
|
|
credentials = grpc.credentials.createInsecure();
|
|
}
|
|
|
|
// Keepalive is the load-bearing piece for detecting a half-open
|
|
// connection. Without it, a silent peer (prime restarted without
|
|
// sending FIN; laptop resumed from sleep; corporate NAT dropped
|
|
// the flow) leaves us in a "connected" state until the OS-level
|
|
// TCP keepalive fires — on Linux ~2 hours by default. 10 s ping
|
|
// + 3 s timeout catches all of that within ~13 s.
|
|
const channelOptions = {
|
|
"grpc.keepalive_time_ms": 10000,
|
|
"grpc.keepalive_timeout_ms": 3000,
|
|
"grpc.keepalive_permit_without_calls": 1,
|
|
"grpc.http2.max_pings_without_data": 0,
|
|
};
|
|
|
|
this.client = new SmartContractService(
|
|
this.config.serverAddress,
|
|
credentials,
|
|
channelOptions
|
|
);
|
|
|
|
console.log(`[SC-Client] Connected to server at ${this.config.serverAddress}`);
|
|
return true;
|
|
} catch (e) {
|
|
console.error(`[SC-Client] Failed to connect: ${e}`);
|
|
return false;
|
|
}
|
|
}
|
|
|
|
/**
|
|
* Close the gRPC connection.
|
|
*/
|
|
close(): void {
|
|
if (this.stream) {
|
|
this.stream.end();
|
|
this.stream = null;
|
|
}
|
|
if (this.client) {
|
|
grpc.closeClient(this.client);
|
|
this.client = null;
|
|
}
|
|
}
|
|
|
|
/**
|
|
* Process a single request.
|
|
*/
|
|
private async processRequest(request: SmartContractRequest): Promise<SmartContractResponse> {
|
|
const logs = "";
|
|
|
|
try {
|
|
const result = await processTransaction(
|
|
request.transactionJson,
|
|
request.envVars,
|
|
request.secrets
|
|
);
|
|
|
|
const response: SmartContractResponse = {
|
|
transactionId: request.transactionId,
|
|
resultJson: result.data ? JSON.stringify(result.data) : "{}",
|
|
logs,
|
|
outputToChain: result.outputToChain,
|
|
error: result.error || "",
|
|
};
|
|
|
|
if (result.error) {
|
|
console.error(
|
|
`[SC-Client] Error processing transaction ${request.transactionId}: ${result.error}`
|
|
);
|
|
} else {
|
|
console.log(
|
|
`[SC-Client] Successfully processed transaction ${request.transactionId}`
|
|
);
|
|
}
|
|
|
|
return response;
|
|
} catch (e) {
|
|
console.error(
|
|
`[SC-Client] Exception processing transaction ${request.transactionId}: ${e}`
|
|
);
|
|
return {
|
|
transactionId: request.transactionId,
|
|
resultJson: "",
|
|
logs,
|
|
outputToChain: false,
|
|
error: String(e),
|
|
};
|
|
}
|
|
}
|
|
|
|
/**
|
|
* Run the client and process incoming requests.
|
|
*/
|
|
async run(): Promise<boolean> {
|
|
if (!this.client) {
|
|
console.error("[SC-Client] Not connected to server");
|
|
return false;
|
|
}
|
|
|
|
this.running = true;
|
|
|
|
// Create metadata for authentication + routing. x-chain-id is
|
|
// required by prime; missing it yields "missing chain ID" and the
|
|
// stream never receives transactions.
|
|
const metadata = new grpc.Metadata();
|
|
metadata.add("x-api-key", this.config.apiKey);
|
|
metadata.add("x-smart-contract-id", this.config.smartContractId);
|
|
metadata.add("x-chain-id", this.config.chainId);
|
|
|
|
return new Promise((resolve) => {
|
|
// Establish bi-directional stream
|
|
this.stream = this.client.Run(metadata);
|
|
|
|
console.log(
|
|
`[SC-Client] Stream established, ready to process requests (workers: ${this.config.numWorkers})`
|
|
);
|
|
|
|
// Handle incoming requests
|
|
this.stream.on("data", async (request: SmartContractRequest) => {
|
|
if (!this.running) return;
|
|
|
|
console.log(
|
|
`[SC-Client] Received request: transaction_id=${request.transactionId}`
|
|
);
|
|
|
|
// Process with concurrency limit
|
|
if (this.processing.size >= this.config.numWorkers) {
|
|
this.workQueue.push(request);
|
|
} else {
|
|
this.startProcessing(request);
|
|
}
|
|
});
|
|
|
|
this.stream.on("end", () => {
|
|
console.log("[SC-Client] Server closed the stream");
|
|
this.running = false;
|
|
resolve(true);
|
|
});
|
|
|
|
this.stream.on("error", (err: grpc.ServiceError) => {
|
|
console.error(`[SC-Client] Stream error: ${err.code} - ${err.message}`);
|
|
this.running = false;
|
|
resolve(false);
|
|
});
|
|
});
|
|
}
|
|
|
|
/**
|
|
* Start processing a request with concurrency tracking.
|
|
*/
|
|
private async startProcessing(request: SmartContractRequest): Promise<void> {
|
|
this.processing.add(request.transactionId);
|
|
|
|
try {
|
|
const response = await this.processRequest(request);
|
|
if (this.stream && this.running) {
|
|
this.stream.write(response);
|
|
}
|
|
} finally {
|
|
this.processing.delete(request.transactionId);
|
|
|
|
// Process next queued request if any
|
|
if (this.workQueue.length > 0 && this.running) {
|
|
const next = this.workQueue.shift()!;
|
|
this.startProcessing(next);
|
|
}
|
|
}
|
|
}
|
|
|
|
/**
|
|
* Stop the client gracefully.
|
|
*/
|
|
stop(): void {
|
|
console.log("[SC-Client] Stopping client...");
|
|
this.running = false;
|
|
}
|
|
}
|
|
|
|
// =============================================================================
|
|
// Configuration Loading
|
|
// =============================================================================
|
|
|
|
interface RawConfig {
|
|
server_address: string;
|
|
chain_id: string;
|
|
smart_contract_id: string;
|
|
api_key: string;
|
|
use_tls?: boolean;
|
|
tls_cert_path?: string;
|
|
num_workers?: number;
|
|
reconnect_delay_seconds?: number;
|
|
max_backoff_seconds?: number;
|
|
max_reconnect_attempts?: number;
|
|
}
|
|
|
|
function loadConfig(configPath: string): Config {
|
|
const content = fs.readFileSync(configPath, "utf8");
|
|
const raw = yaml.load(content) as RawConfig;
|
|
|
|
// Validate required fields
|
|
const required = ["server_address", "chain_id", "smart_contract_id", "api_key"];
|
|
for (const field of required) {
|
|
if (!(field in raw) || !raw[field as keyof RawConfig]) {
|
|
throw new Error(`Missing required config field: ${field}`);
|
|
}
|
|
}
|
|
|
|
return {
|
|
serverAddress: raw.server_address,
|
|
chainId: raw.chain_id,
|
|
smartContractId: raw.smart_contract_id,
|
|
apiKey: raw.api_key,
|
|
useTls: raw.use_tls ?? false,
|
|
tlsCertPath: raw.tls_cert_path,
|
|
numWorkers: raw.num_workers ?? 10,
|
|
reconnectDelaySeconds: raw.reconnect_delay_seconds ?? 3,
|
|
maxBackoffSeconds: raw.max_backoff_seconds ?? 120,
|
|
maxReconnectAttempts: raw.max_reconnect_attempts ?? 0,
|
|
};
|
|
}
|
|
|
|
/**
|
|
* Compute the next reconnect delay in milliseconds using exponential
|
|
* backoff with jitter. base * 2^attempts, capped at maxBackoffSeconds,
|
|
* plus random(0, base) jitter so many clients don't reconnect in
|
|
* lockstep after a server restart.
|
|
*/
|
|
function nextBackoffMs(config: Config, attempts: number): number {
|
|
const baseSec = Math.max(config.reconnectDelaySeconds, 1);
|
|
const capSec = Math.max(config.maxBackoffSeconds, baseSec);
|
|
const shift = Math.min(attempts, 10); // clamp exponent
|
|
const delaySec = Math.min(capSec, baseSec * 2 ** shift);
|
|
const jitterSec = Math.random() * baseSec;
|
|
return Math.round((delaySec + jitterSec) * 1000);
|
|
}
|
|
|
|
// =============================================================================
|
|
// Main Entry Point
|
|
// =============================================================================
|
|
|
|
async function main(): Promise<void> {
|
|
// Parse command line arguments
|
|
const args = process.argv.slice(2);
|
|
let configPath = "config.yaml";
|
|
|
|
for (let i = 0; i < args.length; i++) {
|
|
if (args[i] === "--config" || args[i] === "-c") {
|
|
configPath = args[i + 1];
|
|
i++;
|
|
}
|
|
}
|
|
|
|
// Load configuration
|
|
let config: Config;
|
|
try {
|
|
config = loadConfig(configPath);
|
|
} catch (e) {
|
|
console.error(`[SC-Client] Failed to load config: ${e}`);
|
|
process.exit(1);
|
|
}
|
|
|
|
// Create client
|
|
const client = new SmartContractClient(config);
|
|
|
|
// Setup signal handling for graceful shutdown
|
|
const shutdown = () => {
|
|
console.log("[SC-Client] Received shutdown signal...");
|
|
client.stop();
|
|
};
|
|
|
|
process.on("SIGINT", shutdown);
|
|
process.on("SIGTERM", shutdown);
|
|
|
|
// Connection loop with reconnection logic. A session that runs
|
|
// healthy for 60+ seconds resets the attempts counter so the next
|
|
// failure starts the exponential backoff schedule fresh.
|
|
let attempts = 0;
|
|
const HEALTHY_RUN_MS = 60 * 1000;
|
|
|
|
while (true) {
|
|
if (client.connect()) {
|
|
const start = Date.now();
|
|
const success = await client.run();
|
|
if (Date.now() - start > HEALTHY_RUN_MS) {
|
|
attempts = 0;
|
|
}
|
|
if (!success) {
|
|
// Check if it was a graceful shutdown
|
|
client.close();
|
|
break;
|
|
}
|
|
}
|
|
|
|
client.close();
|
|
|
|
attempts++;
|
|
if (
|
|
config.maxReconnectAttempts > 0 &&
|
|
attempts >= config.maxReconnectAttempts
|
|
) {
|
|
console.error(
|
|
`[SC-Client] Max reconnection attempts (${config.maxReconnectAttempts}) reached`
|
|
);
|
|
break;
|
|
}
|
|
|
|
const delayMs = nextBackoffMs(config, attempts - 1);
|
|
console.log(
|
|
`[SC-Client] Reconnecting in ${(delayMs / 1000).toFixed(1)} seconds (attempt ${attempts})...`
|
|
);
|
|
|
|
await new Promise((resolve) => setTimeout(resolve, delayMs));
|
|
}
|
|
|
|
console.log("[SC-Client] Client shut down");
|
|
}
|
|
|
|
main().catch((e) => {
|
|
console.error(`[SC-Client] Fatal error: ${e}`);
|
|
process.exit(1);
|
|
});
|