diff --git a/dist/index.js b/dist/index.js index 30c9c056..7d943c45 100644 --- a/dist/index.js +++ b/dist/index.js @@ -8601,6 +8601,7 @@ class Comparator { } } + comp = comp.trim().split(/\s+/).join(' ') debug('comparator', comp, options) this.options = options this.loose = !!options.loose @@ -8718,7 +8719,7 @@ class Comparator { module.exports = Comparator const parseOptions = __nccwpck_require__(785) -const { re, t } = __nccwpck_require__(9523) +const { safeRe: re, t } = __nccwpck_require__(9523) const cmp = __nccwpck_require__(5098) const debug = __nccwpck_require__(427) const SemVer = __nccwpck_require__(8088) @@ -8758,19 +8759,26 @@ class Range { this.loose = !!options.loose this.includePrerelease = !!options.includePrerelease - // First, split based on boolean or || + // First reduce all whitespace as much as possible so we do not have to rely + // on potentially slow regexes like \s*. This is then stored and used for + // future error messages as well. this.raw = range - this.set = range + .trim() + .split(/\s+/) + .join(' ') + + // First, split on || + this.set = this.raw .split('||') // map the range to a 2d array of comparators - .map(r => this.parseRange(r.trim())) + .map(r => this.parseRange(r)) // throw out any comparator lists that are empty // this generally means that it was not a valid range, which is allowed // in loose mode, but will still throw if the WHOLE range is invalid. .filter(c => c.length) if (!this.set.length) { - throw new TypeError(`Invalid SemVer Range: ${range}`) + throw new TypeError(`Invalid SemVer Range: ${this.raw}`) } // if we have any that are not the null set, throw out null sets. @@ -8796,9 +8804,7 @@ class Range { format () { this.range = this.set - .map((comps) => { - return comps.join(' ').trim() - }) + .map((comps) => comps.join(' ').trim()) .join('||') .trim() return this.range @@ -8809,8 +8815,6 @@ class Range { } parseRange (range) { - range = range.trim() - // memoize range parsing for performance. // this is a very hot path, and fully deterministic. const memoOpts = @@ -8837,9 +8841,6 @@ class Range { // `^ 1.2.3` => `^1.2.3` range = range.replace(re[t.CARETTRIM], caretTrimReplace) - // normalize spaces - range = range.split(/\s+/).join(' ') - // At this point, the range is completely trimmed and // ready to be split into comparators. @@ -8935,7 +8936,7 @@ const Comparator = __nccwpck_require__(1532) const debug = __nccwpck_require__(427) const SemVer = __nccwpck_require__(8088) const { - re, + safeRe: re, t, comparatorTrimReplace, tildeTrimReplace, @@ -8989,10 +8990,13 @@ const isX = id => !id || id.toLowerCase() === 'x' || id === '*' // ~1.2.3, ~>1.2.3 --> >=1.2.3 <1.3.0-0 // ~1.2.0, ~>1.2.0 --> >=1.2.0 <1.3.0-0 // ~0.0.1 --> >=0.0.1 <0.1.0-0 -const replaceTildes = (comp, options) => - comp.trim().split(/\s+/).map((c) => { - return replaceTilde(c, options) - }).join(' ') +const replaceTildes = (comp, options) => { + return comp + .trim() + .split(/\s+/) + .map((c) => replaceTilde(c, options)) + .join(' ') +} const replaceTilde = (comp, options) => { const r = options.loose ? re[t.TILDELOOSE] : re[t.TILDE] @@ -9030,10 +9034,13 @@ const replaceTilde = (comp, options) => { // ^1.2.0 --> >=1.2.0 <2.0.0-0 // ^0.0.1 --> >=0.0.1 <0.0.2-0 // ^0.1.0 --> >=0.1.0 <0.2.0-0 -const replaceCarets = (comp, options) => - comp.trim().split(/\s+/).map((c) => { - return replaceCaret(c, options) - }).join(' ') +const replaceCarets = (comp, options) => { + return comp + .trim() + .split(/\s+/) + .map((c) => replaceCaret(c, options)) + .join(' ') +} const replaceCaret = (comp, options) => { debug('caret', comp, options) @@ -9090,9 +9097,10 @@ const replaceCaret = (comp, options) => { const replaceXRanges = (comp, options) => { debug('replaceXRanges', comp, options) - return comp.split(/\s+/).map((c) => { - return replaceXRange(c, options) - }).join(' ') + return comp + .split(/\s+/) + .map((c) => replaceXRange(c, options)) + .join(' ') } const replaceXRange = (comp, options) => { @@ -9175,12 +9183,15 @@ const replaceXRange = (comp, options) => { const replaceStars = (comp, options) => { debug('replaceStars', comp, options) // Looseness is ignored here. star is always as loose as it gets! - return comp.trim().replace(re[t.STAR], '') + return comp + .trim() + .replace(re[t.STAR], '') } const replaceGTE0 = (comp, options) => { debug('replaceGTE0', comp, options) - return comp.trim() + return comp + .trim() .replace(re[options.includePrerelease ? t.GTE0PRE : t.GTE0], '') } @@ -9218,7 +9229,7 @@ const hyphenReplace = incPr => ($0, to = `<=${to}` } - return (`${from} ${to}`).trim() + return `${from} ${to}`.trim() } const testSet = (set, version, options) => { @@ -9265,7 +9276,7 @@ const testSet = (set, version, options) => { const debug = __nccwpck_require__(427) const { MAX_LENGTH, MAX_SAFE_INTEGER } = __nccwpck_require__(2293) -const { re, t } = __nccwpck_require__(9523) +const { safeRe: re, t } = __nccwpck_require__(9523) const parseOptions = __nccwpck_require__(785) const { compareIdentifiers } = __nccwpck_require__(2463) @@ -9556,8 +9567,10 @@ class SemVer { default: throw new Error(`invalid increment argument: ${release}`) } - this.format() - this.raw = this.version + this.raw = this.format() + if (this.build.length) { + this.raw += `+${this.build.join('.')}` + } return this } } @@ -9644,7 +9657,7 @@ module.exports = cmp const SemVer = __nccwpck_require__(8088) const parse = __nccwpck_require__(5925) -const { re, t } = __nccwpck_require__(9523) +const { safeRe: re, t } = __nccwpck_require__(9523) const coerce = (version, options) => { if (version instanceof SemVer) { @@ -9752,6 +9765,35 @@ const diff = (version1, version2) => { const highVersion = v1Higher ? v1 : v2 const lowVersion = v1Higher ? v2 : v1 const highHasPre = !!highVersion.prerelease.length + const lowHasPre = !!lowVersion.prerelease.length + + if (lowHasPre && !highHasPre) { + // Going from prerelease -> no prerelease requires some special casing + + // If the low version has only a major, then it will always be a major + // Some examples: + // 1.0.0-1 -> 1.0.0 + // 1.0.0-1 -> 1.1.1 + // 1.0.0-1 -> 2.0.0 + if (!lowVersion.patch && !lowVersion.minor) { + return 'major' + } + + // Otherwise it can be determined by checking the high version + + if (highVersion.patch) { + // anything higher than a patch bump would result in the wrong version + return 'patch' + } + + if (highVersion.minor) { + // anything higher than a minor bump would result in the wrong version + return 'minor' + } + + // bumping major/minor/patch all have same result + return 'major' + } // add the `pre` prefix if we are going to a prerelease version const prefix = highHasPre ? 'pre' : '' @@ -9768,26 +9810,8 @@ const diff = (version1, version2) => { return prefix + 'patch' } - // at this point we know stable versions match but overall versions are not equal, - // so either they are both prereleases, or the lower version is a prerelease - - if (highHasPre) { - // high and low are preleases - return 'prerelease' - } - - if (lowVersion.patch) { - // anything higher than a patch bump would result in the wrong version - return 'patch' - } - - if (lowVersion.minor) { - // anything higher than a minor bump would result in the wrong version - return 'minor' - } - - // bumping major/minor/patch all have same result - return 'major' + // high and low are preleases + return 'prerelease' } module.exports = diff @@ -10217,16 +10241,27 @@ exports = module.exports = {} // The actual regexps go on exports.re const re = exports.re = [] +const safeRe = exports.safeRe = [] const src = exports.src = [] const t = exports.t = {} let R = 0 const createToken = (name, value, isGlobal) => { + // Replace all greedy whitespace to prevent regex dos issues. These regex are + // used internally via the safeRe object since all inputs in this library get + // normalized first to trim and collapse all extra whitespace. The original + // regexes are exported for userland consumption and lower level usage. A + // future breaking change could export the safer regex only with a note that + // all input should have extra whitespace removed. + const safe = value + .split('\\s*').join('\\s{0,1}') + .split('\\s+').join('\\s') const index = R++ debug(name, index, value) t[name] = index src[index] = value re[index] = new RegExp(value, isGlobal ? 'g' : undefined) + safeRe[index] = new RegExp(safe, isGlobal ? 'g' : undefined) } // The following Regular Expressions can be used for tokenizing,