verdaccio/lib/storage.js

476 lines
12 KiB
JavaScript
Raw Normal View History

2013-10-26 14:18:36 +02:00
var async = require('async')
, semver = require('semver')
, UError = require('./error').UserError
, Local = require('./local-storage')
, Proxy = require('./up-storage')
, mystreams = require('./streams')
, utils = require('./utils')
2013-11-24 18:07:18 +01:00
, transaction = require('./transaction')
2013-05-31 08:26:11 +02:00
2013-09-25 11:12:33 +02:00
//
// Implements Storage interface
// (same for storage.js, local-storage.js, up-storage.js)
//
2013-06-08 03:16:28 +02:00
function Storage(config) {
2013-10-26 14:18:36 +02:00
if (!(this instanceof Storage)) return new Storage(config)
2013-06-08 03:16:28 +02:00
2013-10-26 14:18:36 +02:00
this.config = config
2013-09-25 11:12:33 +02:00
// we support a number of uplinks, but only one local storage
// Proxy and Local classes should have similar API interfaces
2013-10-26 14:18:36 +02:00
this.uplinks = {}
2013-06-08 03:16:28 +02:00
for (var p in config.uplinks) {
2013-10-26 14:18:36 +02:00
this.uplinks[p] = new Proxy(config.uplinks[p], config)
this.uplinks[p].upname = p
2013-06-01 00:57:28 +02:00
}
2013-10-26 14:18:36 +02:00
this.local = new Local(config)
2013-06-08 03:16:28 +02:00
2013-10-26 14:18:36 +02:00
return this
2013-05-31 08:26:11 +02:00
}
2013-09-25 11:12:33 +02:00
//
// Add a {name} package to a system
//
// Function checks if package with the same name is available from uplinks.
// If it isn't, we create package metadata locally and send requests to do
// the same to all uplinks with write access. If all actions succeeded, we
// report success, if just one uplink fails, we abort.
//
// TODO: if a package is uploaded to uplink1, but upload to uplink2 fails,
// we report failure, but package is not removed from uplink1. This might
// require manual intervention.
//
// Used storages: local (write) && uplinks (proxy_access, r/o) &&
// uplinks (proxy_publish, write)
2013-09-25 11:12:33 +02:00
//
2013-06-08 03:16:28 +02:00
Storage.prototype.add_package = function(name, metadata, callback) {
2013-10-26 14:18:36 +02:00
var self = this
2013-11-24 18:07:18 +01:00
// NOTE:
// - when we checking package for existance, we ask ALL uplinks
// - when we publishing package, we only publish it to some of them
// so all requests are necessary
2013-06-14 10:34:29 +02:00
2013-11-24 18:07:18 +01:00
check_package(function(err) {
if (err) return callback(err)
publish_package(function(err) {
if (err) return callback(err)
callback()
2013-10-26 14:18:36 +02:00
})
2013-11-24 18:07:18 +01:00
})
function check_package(cb) {
self.get_package(name, function(err, results, err_results) {
// something weird
if (err && err.status !== 404) return cb(err)
for (var i=0; i<err_results.length; i++) {
// checking error
// if uplink fails with a status other than 404, we report failure
if (err_results[i][0] != null) {
if (err_results[i][0].status !== 404) {
return cb(new UError({
status: 503,
msg: 'one of the uplinks is down, refuse to publish'
}))
}
2013-06-14 10:34:29 +02:00
}
}
// checking package
2013-11-24 18:07:18 +01:00
if (results) {
return cb(new UError({
2013-06-14 10:34:29 +02:00
status: 409,
msg: 'this package is already present'
2013-10-26 14:18:36 +02:00
}))
2013-06-14 10:34:29 +02:00
}
2013-10-26 14:18:36 +02:00
2013-11-24 18:07:18 +01:00
return cb()
})
}
function publish_package(cb) {
var fw_uplinks = []
for (var i in self.uplinks) {
if (self.config.proxy_publish(name, i)) {
2013-11-24 18:07:18 +01:00
fw_uplinks.push(self.uplinks[i])
}
}
2013-11-24 18:07:18 +01:00
transaction(
fw_uplinks,
function localAction(cb) {
self.local.add_package(name, metadata, cb)
},
function localRollback(cb) {
self.local.remove_package(name, cb)
},
function remoteAction(remote, cb) {
remote.add_package(name, metadata, cb)
},
function remoteRollback(remote, cb) {
remote.remove_package(name, cb)
},
function(err) {
if (!err) {
callback()
} else if (err.uplink === 'local') {
return callback(err)
} else {
// hide uplink error with general message
return callback(new UError({
status: 503,
msg: 'can\'t upload to one of the uplinks, refuse to publish'
}))
}
}
2013-11-24 18:07:18 +01:00
)
}
2013-06-01 00:57:28 +02:00
}
2013-09-25 11:12:33 +02:00
//
// Add a new version of package {name} to a system
//
// Function uploads a new package version to all uplinks with write access
// and if everything succeeded it adds it locally.
//
// TODO: if a package is uploaded to uplink1, but upload to uplink2 fails,
// we report failure, but package is not removed from uplink1. This might
// require manual intervention.
//
// Used storages: local (write) && uplinks (proxy_publish, write)
2013-09-25 11:12:33 +02:00
//
2013-06-08 03:16:28 +02:00
Storage.prototype.add_version = function(name, version, metadata, tag, callback) {
2013-10-26 14:18:36 +02:00
var self = this
2013-10-26 14:18:36 +02:00
var uplinks = []
for (var i in self.uplinks) {
if (self.config.proxy_publish(name, i)) {
2013-10-26 14:18:36 +02:00
uplinks.push(self.uplinks[i])
}
}
async.map(uplinks, function(up, cb) {
2013-10-26 14:18:36 +02:00
up.add_version(name, version, metadata, tag, cb)
}, function(err, results) {
if (err) {
return callback(new UError({
status: 503,
msg: 'can\'t upload to one of the uplinks, refuse to publish'
2013-10-26 14:18:36 +02:00
}))
}
2013-10-26 14:18:36 +02:00
self.local.add_version(name, version, metadata, tag, callback)
})
2013-06-08 03:16:28 +02:00
}
2013-06-01 00:57:28 +02:00
//
// Change an existing package (i.e. unpublish one version)
//
// Function changes a package info from local storage and all uplinks with
// write access.
//
// TODO: currently it works only locally
//
// TODO: if a package is uploaded to uplink1, but upload to uplink2 fails,
// we report failure, but package is not removed from uplink1. This might
// require manual intervention.
//
// Used storages: local (write) && uplinks (proxy_publish, write)
//
Storage.prototype.change_package = function(name, metadata, revision, callback) {
return this.local.change_package(name, metadata, revision, callback)
}
//
// Remove a package from a system
//
// Function removes a package from local storage and all uplinks with
// write access.
//
// TODO: currently it works only locally
//
// TODO: if a package is uploaded to uplink1, but upload to uplink2 fails,
// we report failure, but package is not removed from uplink1. This might
// require manual intervention.
//
// Used storages: local (write) && uplinks (proxy_publish, write)
//
Storage.prototype.remove_package = function(name, callback) {
return this.local.remove_package(name, callback)
}
//
// Remove a tarball from a system
//
// Function removes a tarball from local storage and all uplinks with
// write access. Tarball in question should not be linked to in any existing
// versions, i.e. package version should be unpublished first.
//
// TODO: currently it works only locally
//
// TODO: if a package is uploaded to uplink1, but upload to uplink2 fails,
// we report failure, but package is not removed from uplink1. This might
// require manual intervention.
//
// Used storages: local (write) && uplinks (proxy_publish, write)
//
Storage.prototype.remove_tarball = function(name, filename, revision, callback) {
return this.local.remove_tarball(name, filename, revision, callback)
}
2013-09-25 11:12:33 +02:00
//
2013-09-28 14:19:40 +02:00
// Upload a tarball for {name} package
2013-09-25 11:12:33 +02:00
//
// Function is syncronous and returns a WritableStream
//
2013-09-28 14:19:40 +02:00
// Function uploads a tarball to all uplinks with write access and to
// local storage in parallel with a speed of a slowest pipe. It reports
// success if all uploads succeed.
//
// Used storages: local (write) && uplinks (proxy_publish, write)
2013-09-25 11:12:33 +02:00
//
2013-06-20 15:07:34 +02:00
Storage.prototype.add_tarball = function(name, filename) {
2013-10-26 14:18:36 +02:00
var stream = new mystreams.UploadTarballStream()
var self = this
var upstreams = []
var localstream = self.local.add_tarball(name, filename)
2013-10-26 14:18:36 +02:00
upstreams.push(localstream)
2013-09-28 14:19:40 +02:00
for (var i in self.uplinks) {
if (self.config.proxy_publish(name, i)) {
2013-10-26 14:18:36 +02:00
upstreams.push(self.uplinks[i].add_tarball(name, filename))
2013-09-28 14:19:40 +02:00
}
}
function bail(err) {
upstreams.forEach(function(upstream) {
2013-10-26 14:18:36 +02:00
upstream.abort()
})
2013-09-28 14:19:40 +02:00
}
upstreams.forEach(function(upstream) {
2013-10-26 14:18:36 +02:00
stream.pipe(upstream)
2013-09-28 14:19:40 +02:00
upstream.on('error', function(err) {
if (err.code === 'EEXISTS') {
stream.emit('error', new UError({
status: 409,
msg: 'this tarball is already present'
2013-10-26 14:18:36 +02:00
}))
} else if (!stream.status && upstream !== localstream) {
2013-09-28 14:37:24 +02:00
stream.emit('error', new UError({
status: 503,
msg: 'one or more uplinks are unreachable'
2013-10-26 14:18:36 +02:00
}))
2013-09-28 14:19:40 +02:00
} else {
2013-10-26 14:18:36 +02:00
stream.emit('error', err)
2013-09-28 14:19:40 +02:00
}
2013-10-26 14:18:36 +02:00
bail(err)
})
2013-09-28 14:19:40 +02:00
upstream.on('success', function() {
2013-10-26 14:18:36 +02:00
upstream._sinopia_success = true
2013-09-28 14:19:40 +02:00
if (upstreams.filter(function(upstream) {
2013-10-26 14:18:36 +02:00
return !upstream._sinopia_success
2013-10-01 20:02:23 +02:00
}).length === 0) {
2013-10-26 14:18:36 +02:00
stream.emit('success')
2013-09-28 14:19:40 +02:00
}
2013-10-26 14:18:36 +02:00
})
})
2013-09-28 14:19:40 +02:00
stream.abort = function() {
2013-10-26 14:18:36 +02:00
bail()
}
2013-09-28 14:19:40 +02:00
stream.done = function() {
upstreams.forEach(function(upstream) {
2013-10-26 14:18:36 +02:00
upstream.done()
})
}
2013-09-28 14:19:40 +02:00
2013-10-26 14:18:36 +02:00
return stream
2013-06-01 00:57:28 +02:00
}
2013-09-25 11:12:33 +02:00
//
// Get a tarball from a storage for {name} package
//
// Function is syncronous and returns a ReadableStream
//
2013-09-27 02:26:15 +02:00
// Function tries to read tarball locally, if it fails then it reads package
// information in order to figure out where we can get this tarball from
//
// Used storages: local || uplink (just one)
2013-09-25 11:12:33 +02:00
//
Storage.prototype.get_tarball = function(name, filename) {
2013-10-26 14:18:36 +02:00
var stream = new mystreams.ReadTarballStream()
stream.abort = function() {}
var self = this
2013-06-19 18:58:16 +02:00
2013-06-18 20:14:55 +02:00
// if someone requesting tarball, it means that we should already have some
// information about it, so fetching package info is unnecessary
// trying local first
2013-10-26 14:18:36 +02:00
var rstream = self.local.get_tarball(name, filename)
var is_open = false
2013-06-20 15:07:34 +02:00
rstream.on('error', function(err) {
if (is_open || err.status !== 404) {
2013-10-26 14:18:36 +02:00
return stream.emit('error', err)
2013-06-20 15:07:34 +02:00
}
2013-06-20 15:07:34 +02:00
// local reported 404
2013-10-26 14:18:36 +02:00
var err404 = err
var uplink = null
rstream.abort()
rstream = null // gc
2013-06-18 20:14:55 +02:00
2013-06-19 18:58:16 +02:00
self.local.get_package(name, function(err, info) {
2013-10-26 14:18:36 +02:00
if (err) return stream.emit('error', err)
2013-06-18 20:14:55 +02:00
2013-06-19 18:58:16 +02:00
if (info._distfiles[filename] == null) {
2013-10-26 14:18:36 +02:00
return stream.emit('error', err404)
2013-06-19 18:58:16 +02:00
}
2013-10-26 14:18:36 +02:00
var file = info._distfiles[filename]
var uplink = null
2013-06-19 18:58:16 +02:00
for (var p in self.uplinks) {
if (self.uplinks[p].can_fetch_url(file.url)) {
2013-10-26 14:18:36 +02:00
uplink = self.uplinks[p]
2013-06-19 18:58:16 +02:00
}
}
if (uplink == null) {
uplink = new Proxy({
2013-06-20 15:41:07 +02:00
url: file.url,
2013-06-19 18:58:16 +02:00
_autogenerated: true,
2013-10-26 14:18:36 +02:00
}, self.config)
2013-06-19 18:58:16 +02:00
}
2013-10-26 14:18:36 +02:00
var savestream = self.local.add_tarball(name, filename)
2013-06-20 18:54:50 +02:00
savestream.on('error', function(err) {
2013-10-26 14:18:36 +02:00
savestream.abort()
stream.emit('error', err)
})
savestream.on('open', function() {
2013-10-26 14:18:36 +02:00
var rstream2 = uplink.get_url(file.url)
rstream2.on('error', function(err) {
2013-10-26 14:18:36 +02:00
savestream.abort()
stream.emit('error', err)
})
rstream2.on('end', function() {
2013-10-26 14:18:36 +02:00
savestream.done()
})
// XXX: check, what would happen if client disconnects?
2013-10-26 14:18:36 +02:00
rstream2.pipe(stream)
rstream2.pipe(savestream)
})
})
})
2013-06-20 15:07:34 +02:00
rstream.on('open', function() {
2013-10-26 14:18:36 +02:00
is_open = true
rstream.pipe(stream)
})
return stream
2013-06-01 00:57:28 +02:00
}
2013-09-25 11:12:33 +02:00
//
// Retrieve a package metadata for {name} package
//
// Function invokes local.get_package and uplink.get_package for every
// uplink with proxy_access rights against {name} and combines results
// into one json object
//
// Used storages: local && uplink (proxy_access)
//
2013-06-08 03:16:28 +02:00
Storage.prototype.get_package = function(name, callback) {
2013-11-24 18:07:18 +01:00
// NOTE: callback(err, result, _uplink_errors)
// _uplink_errors is an array of errors used internally
// XXX: move it to another function maybe?
2013-10-22 11:31:48 +02:00
var self = this
2013-06-08 03:16:28 +02:00
2013-10-22 11:31:48 +02:00
self.local.get_package(name, function(err, data) {
if (err && (!err.status || err.status >= 500)) {
// report internal errors right away
return cb(err)
}
2013-05-31 08:26:11 +02:00
2013-10-22 11:31:48 +02:00
var uplinks = []
for (var i in self.uplinks) {
if (self.config.proxy_access(name, i)) {
uplinks.push(self.uplinks[i])
}
2013-10-22 11:31:48 +02:00
}
2013-06-08 03:16:28 +02:00
2013-10-22 11:31:48 +02:00
var result = data || {
name: name,
versions: {},
'dist-tags': {},
_uplinks: {},
}
var exists = !err
var latest = result['dist-tags'].latest
2013-06-20 15:07:34 +02:00
2013-10-22 11:31:48 +02:00
async.map(uplinks, function(up, cb) {
var oldetag = null
if (utils.is_object(result._uplinks[up.upname]))
oldetag = result._uplinks[up.upname].etag
2013-06-08 03:16:28 +02:00
2013-10-22 11:31:48 +02:00
up.get_package(name, oldetag, function(err, up_res, etag) {
2013-11-24 18:07:18 +01:00
if (err || !up_res) return cb(null, [err || new Error('no data')])
2013-06-14 09:56:02 +02:00
2013-10-22 11:31:48 +02:00
try {
utils.validate_metadata(up_res, name)
} catch(err) {
2013-11-24 18:07:18 +01:00
return cb(null, [err])
2013-06-08 03:16:28 +02:00
}
2013-06-14 10:34:29 +02:00
2013-10-22 11:31:48 +02:00
result._uplinks[up.upname] = {
etag: etag
}
var this_version = up_res['dist-tags'].latest
if (latest == null
|| (!semver.gt(latest, this_version) && this_version)) {
latest = this_version
var is_latest = true
}
2013-10-26 14:18:36 +02:00
;['versions', 'dist-tags'].forEach(function(key) {
2013-10-22 11:31:48 +02:00
for (var i in up_res[key]) {
if (!result[key][i] || is_latest) {
result[key][i] = up_res[key][i]
}
}
})
2013-06-18 20:14:55 +02:00
2013-10-22 11:31:48 +02:00
// if we got to this point, assume that the correct package exists
// on the uplink
exists = true
cb()
})
2013-11-24 18:07:18 +01:00
}, function(err, uplink_errors) {
2013-10-26 14:18:36 +02:00
if (err) return callback(err)
2013-10-22 11:31:48 +02:00
if (!exists) {
return callback(new UError({
status: 404,
msg: 'no such package available'
2013-11-24 18:07:18 +01:00
}), null, uplink_errors)
2013-10-22 11:31:48 +02:00
}
self.local.update_versions(name, result, function(err) {
if (err) return callback(err)
var whitelist = ['_rev', 'name', 'versions', 'dist-tags']
for (var i in result) {
if (!~whitelist.indexOf(i)) delete result[i]
}
2013-11-24 18:07:18 +01:00
callback(null, result, uplink_errors)
2013-10-22 11:31:48 +02:00
})
})
})
2013-05-31 08:26:11 +02:00
}
2013-10-26 14:18:36 +02:00
module.exports = Storage
2013-06-08 03:16:28 +02:00