node-memcached | |
memcached | lib/memcached.js |
var EventEmitter = require('events').EventEmitter , Stream = require('net').Stream , Buffer = require('buffer').Buffer; var HashRing = require('hashring') , Connection = require('./connection') , Utils = require('./utils') , Manager = Connection.Manager , IssueLog = Connection.IssueLog; // The constructor function Client(args, options){ if(!(this && this.hasOwnProperty && (this instanceof Client))) this = new Client(); var servers = [] , weights = {} , key; // Parse down the connection arguments if (!servers.length) throw new Error('No servers where supplied in the arguments'); // merge with global and user config Utils.merge(this, Client.config); Utils.merge(this, options); EventEmitter.call(this); this.servers = servers; this.HashRing = new HashRing(args, this.algorithm); this.connections = {}; this.issues = []; }; // Allows users to configure the memcached globally or per memcached client Client.config = { maxKeySize: 251 // max key size allowed by Memcached , maxExpiration: 2592000 // max expiration duration allowed by Memcached , maxValue: 1048576 // max length of value allowed by Memcached , algorithm: 'crc32' // hashing algorithm that is used for key mapping , poolSize: 10 // maximal parallel connections , reconnect: 18000000 // if dead, attempt reconnect each xx ms , timeout: 5000 // after x ms the server should send a timeout if we can't connect , retries: 5 // amount of retries before server is dead , retry: 30000 // timeout between retries, all call will be marked as cache miss , remove: false // remove server if dead if false, we will attempt to reconnect , redundancy: false // allows you do re-distribute the keys over a x amount of servers , keyCompression: true // compress keys if they are to large (md5) , debug: false // Output the commands and responses }; // There some functions we don't want users to touch so we scope them (function(nMemcached){ const LINEBREAK = '\r\n' , NOREPLY = ' noreply' , FLUSH = 1E3 , BUFFER = 1E2 , CONTINUE = 1E1 , FLAGJSON = 1<<1 , FLAGBINARY = 2<<1; var memcached = nMemcached.prototype = new EventEmitter , private = {} , undefined; // Creates or generates a new connection for the give server, the callback will receive the connection // if the operation was successful memcached.connect = function connect(server, callback){ // server is dead, bail out if (server in this.issues && this.issues[server].failed) return callback(false, false);
}; // Creates a multi stream, so it's easier to query agains // multiple memcached servers. memcached.multi = function memcachedMulti(keys, callback){ var map = {} , memcached = this , servers , i;
}; // Executes the command on the net.Stream, if no server is supplied it will use the query.key to get // the server from the HashRing memcached.command = function memcachedCommand(queryCompiler, server){
}; // Logs all connection issues, and handles them off. Marking all requests as cache misses. memcached.connectionIssue = function connectionIssue(error, S, callback){ // end connection and mark callback as cache miss if (S && S.end) S.end(); if (callback) callback(false, false);
}; // Kills all active connections memcached.end = function endMemcached(){ var memcached = this; Object.keys(this.connections).forEach(function closeConnection(key){ memcached.connections[key].free(0) }); }; // These do not need to be publicly available as it's one of the most important // parts of the whole client, the parser commands: private.parsers = { // handle error responses 'NOTFOUND': function(tokens, dataSet, err){ return [CONTINUE, false] } , 'NOTSTORED': function(tokens, dataSet, err){ return [CONTINUE, false] } , 'ERROR': function(tokens, dataSet, err){ err.push('Received an ERROR response'); return [FLUSH, false] } , 'CLIENTERROR': function(tokens, dataSet, err){ err.push(tokens.splice(1).join(' ')); return [CONTINUE, false] } , 'SERVERERROR': function(tokens, dataSet, err, queue, S, memcached){ memcached.connectionIssue(tokens.splice(1).join(' '), S); return [CONTINUE, false] }
, 'STORED': function(tokens, dataSet){ return [CONTINUE, true] } , 'DELETED': function(tokens, dataSet){ return [CONTINUE, true] } , 'OK': function(tokens, dataSet){ return [CONTINUE, true] } , 'EXISTS': function(tokens, dataSet){ return [CONTINUE, false] } , 'END': function(tokens, dataSet, err, queue){ if (!queue.length) queue.push(false); return [FLUSH, true] }
, 'VALUE': function(tokens, dataSet, err, queue){ var key = tokens[1] , flag = +tokens[2] , expire = tokens[3] , cas = tokens[4] , multi = this.metaData[0] && this.metaData[0].multi || cas ? {} : false , tmp;
, 'INCRDECR': function(tokens){ return [CONTINUE, +tokens[1]] } , 'STAT': function(tokens, dataSet, err, queue){ queue.push([tokens[1], /^\d+$/.test(tokens[2]) ? +tokens[2] : tokens[2]]); return [BUFFER, true] } , 'VERSION': function(tokens, dataSet){ var versionTokens = /(\d+)(?:.)(\d+)(?:.)(\d+)$/.exec(tokens.pop());
, 'ITEM': function(tokens, dataSet, err, queue){ queue.push({ key: tokens[1] , b: +tokens[2].substr(1) , s: +tokens[4] }); return [BUFFER, false] } }; // Parses down result sets private.resultParsers = { // combines the stats array, in to an object 'stats': function(resultSet){ var response = {};
, 'stats settings': function(){ return private.resultParsers.stats.apply(this, arguments) } // Group slabs by slab id , 'stats slabs': function(resultSet){ var response = {};
, 'stats items': function(resultSet){ var response = {};
}; // Generates a RegExp that can be used to check if a chunk is memcached response identifier private.allCommands = new RegExp('^(?:' + Object.keys(private.parsers).join('|') + '|\d' + ')'); private.bufferedCommands = new RegExp('^(?:' + Object.keys(private.parsers).join('|') + ')'); // When working with large chunks of responses, node chunks it in to pieces. So we might have // half responses. So we are going to buffer up the buffer and user our buffered buffer to query // against. Also when you execute allot of .writes to the same stream, node will combine the responses // in to one response stream. With no indication where it had cut the data. So it can be it cuts inside the value response, // or even right in the middle of a line-break, so we need to make sure, the last piece in the buffer is a LINEBREAK // because that is all what is sure about the Memcached Protocol, all responds end with them. private.buffer = function BufferBuffer(S, BufferStream){ S.responseBuffer += BufferStream;
}; // The actual parsers function that scan over the responseBuffer in search of Memcached response // identifiers. Once we have found one, we will send it to the dedicated parsers that will transform // the data in a human readable format, deciding if we should queue it up, or send it to a callback fn. memcached.rawDataReceived = function rawDataReceived(S){ var queue = [] , token , tokenSet , dataSet = '' , resultSet , metaData , err = [] , tmp;
}; // Small wrapper function that only executes errors when we have a callback private.errorResponse = function errorResponse(error, callback){ if (typeof callback == 'function') callback(error, false);
}; // This is where the actual Memcached API layer begins: memcached.get = function get(key, callback){ if (Array.isArray(key)) return this.getMulti.apply(this, arguments);
}; // the difference between get and gets is that gets, also returns a cas value // and gets doesn't support multi-gets at this moment. memcached.gets = function get(key, callback){ this.command(function getCommand(noreply){ return { key: key , callback: callback , validate: [['key', String], ['callback', Function]] , type: 'gets' , command: 'gets ' + key }}); }; // Handles get's with multiple keys memcached.getMulti = function getMulti(keys, callback){ var memcached = this , responses = {} , errors = [] , calls
}; // As all command nearly use the same syntax we are going to proxy them all to this // function to ease maintenance. This is possible because most set commands will use the same // syntax for the Memcached server. Some commands do not require a lifetime and a flag, but the // memcached server is smart enough to ignore those. private.setters = function setters(type, validate, key, value, lifetime, callback, cas){ var flag = 0 , memcached = this , valuetype = typeof value , length;
}; // Curry the function and so we can tell the type our private set function memcached.set = Utils.curry(false, private.setters, 'set', [['key', String], ['lifetime', Number], ['value', String], ['callback', Function]]); memcached.replace = Utils.curry(false, private.setters, 'replace', [['key', String], ['lifetime', Number], ['value', String], ['callback', Function]]); memcached.add = Utils.curry(false, private.setters, 'add', [['key', String], ['lifetime', Number], ['value', String], ['callback', Function]]); memcached.cas = function checkandset(key, value, cas, lifetime, callback){ private.setters.call(this, 'cas', [['key', String], ['lifetime', Number], ['value', String], ['callback', Function]], key, value, lifetime, callback, cas); }; memcached.append = function append(key, value, callback){ private.setters.call(this, 'append', [['key', String], ['lifetime', Number], ['value', String], ['callback', Function]], key, value, 0, callback); }; memcached.prepend = function prepend(key, value, callback){ private.setters.call(this, 'prepend', [['key', String], ['lifetime', Number], ['value', String], ['callback', Function]], key, value, 0, callback); }; // Small handler for incr and decr's private.incrdecr = function incrdecr(type, key, value, callback){ this.command(function incredecrCommand(noreply){ return { key: key , callback: callback , value: value , validate: [['key', String], ['value', Number], ['callback', Function]] , type: type , redundancyEnabled: true , command: [type, key, value].join(' ') + (noreply ? NOREPLY : '') }}); }; // Curry the function and so we can tell the type our private incrdecr memcached.increment = memcached.incr = Utils.curry(false, private.incrdecr, 'incr'); memcached.decrement = memcached.decr = Utils.curry(false, private.incrdecr, 'decr'); // Deletes the keys from the servers memcached.del = function del(key, callback){ this.command(function deleteCommand(noreply){ return { key: key , callback: callback , validate: [['key', String], ['callback', Function]] , type: 'delete' , redundancyEnabled: true , command: 'delete ' + key + (noreply ? NOREPLY : '') }}); }; memcached['delete'] = memcached.del; // Small wrapper that handle single keyword commands such as FLUSH ALL, VERSION and STAT private.singles = function singles(type, callback){ var memcached = this , responses = [] , errors = [] , calls
}; // Curry the function and so we can tell the type our private singles memcached.version = Utils.curry(false, private.singles, 'version'); memcached.flush = Utils.curry(false, private.singles, 'flush_all'); memcached.stats = Utils.curry(false, private.singles, 'stats'); memcached.settings = Utils.curry(false, private.singles, 'stats settings'); memcached.slabs = Utils.curry(false, private.singles, 'stats slabs'); memcached.items = Utils.curry(false, private.singles, 'stats items'); // You need to use the items dump to get the correct server and slab settings // see simple_cachedump.js for an example memcached.cachedump = function cachedump(server, slabid, number, callback){ this.command(function cachedumpCommand(noreply){ return { callback: callback , number: number , slabid: slabid , validate: [['number', Number], ['slabid', Number], ['callback', Function]] , type: 'stats cachedump' , command: 'stats cachedump ' + slabid + ' ' + number }}, server ); }; })(Client); module.exports = Client; |
|
utils | lib/utils.js |
var CreateHash = require('crypto').createHash; exports.validateArg = function validateArg(args, config){ var toString = Object.prototype.toString , err , callback; args.validate.forEach(function(tokens){ var key = tokens[0] , value = args[key];
}); if (err){ if(callback) callback(err, false); return false; } return true; }; // currys a function exports.curry = function curry(context, func){ var copy = Array.prototype.slice , args = copy.call(arguments, 2); return function(){ return func.apply(context || this, args.concat(copy.call(arguments))); } }; // a small util to use an object for eventEmitter exports.fuse = function fuse(target, handlers){ for(var i in handlers) if (handlers.hasOwnProperty(i)){ target.on(i, handlers[i]); } }; // merges a object's proppertys / values with a other object exports.merge = function merge(target, obj){ for(var i in obj){ target[i] = obj[i]; } return target; }; // a small items iterator exports.Iterator = function iterator(collection, callback){ var arr = Array.isArray(collection) , keys = !arr ? Object.keys(collection) : false , index = 0 , maximum = arr ? collection.length : keys.length , self = this; // returns next item this.next = function(){ var obj = arr ? collection[index] : { key: keys[index], value: collection[keys[index]] }; callback(obj, index++, collection, self); }; // check if we have more items this.hasNext = function(){ return index < maximum; }; }; |
|
connection | lib/connection.js |
var EventEmitter = require('events').EventEmitter , Spawn = require('child_process').spawn , Utils = require('./utils'); exports.Manager = ConnectionManager; // connection pooling exports.IssueLog = IssueLog; // connection issue handling exports.Available = ping; // connection availablity function ping(host, callback){ var pong = Spawn('ping', [host]); pong.stdout.on('data', function(data) { callback(false, data.toString().split('\n')[0].substr(14)); pong.kill(); }); pong.stderr.on('data', function(data) { callback(data.toString().split('\n')[0].substr(14), false); pong.kill(); }); }; function IssueLog(args){ this.config = args; this.messages = []; this.failed = false; this.totalRetries = 0; this.totalReconnectsAttempted = 0; this.totalReconnectsSuccess = 0; Utils.merge(this, args); EventEmitter.call(this); }; var issues = IssueLog.prototype = new EventEmitter; issues.log = function(message){ var issue = this; this.failed = true; this.messages.push(message || 'No message specified'); if (this.retries){ setTimeout(Utils.curry(issue, issue.attemptRetry), this.retry); return this.emit('issue', this.details); } if (this.remove) return this.emit('remove', this.details) setTimeout(Utils.curry(issue, issue.attemptReconnect), this.reconnect); }; Object.defineProperty(issues, 'details', { get: function(){ var res = {};
} }); issues.attemptRetry = function(){ this.totalRetries++; this.retries--; this.failed = false; }; issues.attemptReconnect = function(){ var issue = this; this.totalReconnectsAttempted++; this.emit('reconnecting', this.details); // Ping the server ping(this.tokens[1], function(err){ // still no access to the server if (err){ this.messages.push(message || 'No message specified'); return setTimeout(Utils.curry(issue, issue.attemptReconnect), issue.reconnect); }
}); }; function ConnectionManager(name, limit, constructor){ this.name = name; this.total = limit; this.factory = constructor; this.connections = []; }; var Manager = ConnectionManager.prototype; Manager.allocate = function(callback){ var total , i = total = this.connections.length , Manager = this; // check for available while(i--){ if (this.isAvailable(this.connections[i])){ return callback(false, this.connections[i]); } } // create new if (total < this.total){ return this.connections.push(this.factory.apply(this, arguments)); } // wait untill the next event loop tick, to try again process.nextTick(function(){Manager.allocate(callback)}); }; Manager.isAvailable = function(connection){ var readyState = connection.readyState; return (readyState == 'open' || readyState == 'writeOnly') && !(connection.writeQueue && connection.writeQueue.length); }; Manager.remove = function(connection){ var position = this.connections.indexOf(connection); if (position !== -1) this.connections.splice(position, 1); if (connection.readyState && connection.readyState !== 'closed' && connection.end) connection.end(); }; Manager.free = function(keep){ var save = 0 , connection; while(this.connections.length){ connection = this.connections.shift(); if(save < keep && this.isAvailable(this.connection[0])){ save++ continue; }
} }; |