2024-03-18 13:20:47 +01:00
|
|
|
import { assertNotNull, assertNotUndefined } from '../utils/assert';
|
|
|
|
import { AstMatcher, ParseError, TokenList } from './types';
|
|
|
|
|
|
|
|
type TokenName = string;
|
|
|
|
type Token = [TokenName, RegExp];
|
|
|
|
|
|
|
|
const tokenList: Token[] = [
|
|
|
|
['fuzz', /^~(?:\d+(\.\d+)?|\.\d+)/],
|
|
|
|
['boost', /^\^[-+]?\d+(\.\d+)?/],
|
|
|
|
['quoted_lit', /^\s*"(?:[^"]|\\")+"/],
|
|
|
|
['lparen', /^\s*\(\s*/],
|
|
|
|
['rparen', /^\s*\)\s*/],
|
|
|
|
['and_op', /^\s*(?:&&|AND)\s+/],
|
|
|
|
['and_op', /^\s*,\s*/],
|
|
|
|
['or_op', /^\s*(?:\|\||OR)\s+/],
|
|
|
|
['not_op', /^\s*NOT(?:\s+|(?=\())/],
|
|
|
|
['not_op', /^\s*[!-]\s*/],
|
|
|
|
['space', /^\s+/],
|
|
|
|
['word', /^(?:\\[\s,()^~]|[^\s,()^~])+/],
|
2024-07-04 02:27:59 +02:00
|
|
|
['word', /^(?:\\[\s,()]|[^\s,()])+/],
|
2024-03-18 13:20:47 +01:00
|
|
|
];
|
|
|
|
|
|
|
|
export type ParseTerm = (term: string, fuzz: number, boost: number) => AstMatcher;
|
|
|
|
|
2024-05-29 04:54:45 +02:00
|
|
|
export type Range = [number, number];
|
|
|
|
export type TermContext = [Range, string];
|
|
|
|
|
|
|
|
export interface LexResult {
|
2024-07-04 02:27:59 +02:00
|
|
|
tokenList: TokenList;
|
|
|
|
termContexts: TermContext[];
|
|
|
|
error: ParseError | null;
|
2024-05-29 04:54:45 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
export function generateLexResult(searchStr: string, parseTerm: ParseTerm): LexResult {
|
2024-07-04 02:27:59 +02:00
|
|
|
const opQueue: string[] = [];
|
|
|
|
const groupNegate: boolean[] = [];
|
2024-03-18 13:20:47 +01:00
|
|
|
|
|
|
|
let searchTerm: string | null = null;
|
|
|
|
let boostFuzzStr = '';
|
|
|
|
let localSearchStr: string = searchStr;
|
|
|
|
let negate = false;
|
|
|
|
let boost = 1;
|
|
|
|
let fuzz = 0;
|
|
|
|
let lparenCtr = 0;
|
|
|
|
|
2024-05-29 04:54:45 +02:00
|
|
|
let termIndex = 0;
|
|
|
|
let index = 0;
|
|
|
|
|
|
|
|
const ret: LexResult = {
|
|
|
|
tokenList: [],
|
|
|
|
termContexts: [],
|
2024-07-04 02:27:59 +02:00
|
|
|
error: null,
|
2024-05-29 04:54:45 +02:00
|
|
|
};
|
|
|
|
|
|
|
|
const beginTerm = (token: string) => {
|
|
|
|
searchTerm = token;
|
|
|
|
termIndex = index;
|
|
|
|
};
|
|
|
|
|
|
|
|
const endTerm = () => {
|
2024-03-18 13:20:47 +01:00
|
|
|
if (searchTerm !== null) {
|
|
|
|
// Push to stack.
|
2024-05-29 04:54:45 +02:00
|
|
|
ret.tokenList.push(parseTerm(searchTerm, fuzz, boost));
|
|
|
|
ret.termContexts.push([[termIndex, termIndex + searchTerm.length], searchTerm]);
|
2024-03-18 13:20:47 +01:00
|
|
|
// Reset term and options data.
|
|
|
|
boost = 1;
|
|
|
|
fuzz = 0;
|
|
|
|
searchTerm = null;
|
|
|
|
boostFuzzStr = '';
|
|
|
|
lparenCtr = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (negate) {
|
2024-05-29 04:54:45 +02:00
|
|
|
ret.tokenList.push('not_op');
|
2024-03-18 13:20:47 +01:00
|
|
|
negate = false;
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
while (localSearchStr.length > 0) {
|
|
|
|
for (const [tokenName, tokenRe] of tokenList) {
|
|
|
|
const match = tokenRe.exec(localSearchStr);
|
|
|
|
|
|
|
|
if (!match) {
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
const token = match[0];
|
2024-07-04 02:27:59 +02:00
|
|
|
const tokenIsBinaryOp = ['and_op', 'or_op'].indexOf(tokenName) !== -1;
|
|
|
|
const tokenIsGroupStart = tokenName === 'rparen' && lparenCtr === 0;
|
2024-03-18 13:20:47 +01:00
|
|
|
|
2024-07-04 02:27:59 +02:00
|
|
|
if (searchTerm !== null && (tokenIsBinaryOp || tokenIsGroupStart)) {
|
2024-05-29 04:54:45 +02:00
|
|
|
endTerm();
|
2024-03-18 13:20:47 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
switch (tokenName) {
|
|
|
|
case 'and_op':
|
|
|
|
while (opQueue[0] === 'and_op') {
|
2024-05-29 04:54:45 +02:00
|
|
|
ret.tokenList.push(assertNotUndefined(opQueue.shift()));
|
2024-03-18 13:20:47 +01:00
|
|
|
}
|
|
|
|
opQueue.unshift('and_op');
|
|
|
|
break;
|
|
|
|
case 'or_op':
|
|
|
|
while (opQueue[0] === 'and_op' || opQueue[0] === 'or_op') {
|
2024-05-29 04:54:45 +02:00
|
|
|
ret.tokenList.push(assertNotUndefined(opQueue.shift()));
|
2024-03-18 13:20:47 +01:00
|
|
|
}
|
|
|
|
opQueue.unshift('or_op');
|
|
|
|
break;
|
|
|
|
case 'not_op':
|
|
|
|
if (searchTerm) {
|
|
|
|
// We're already inside a search term, so it does not apply, obv.
|
|
|
|
searchTerm += token;
|
2024-07-04 02:27:59 +02:00
|
|
|
} else {
|
2024-03-18 13:20:47 +01:00
|
|
|
negate = !negate;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
case 'lparen':
|
|
|
|
if (searchTerm) {
|
|
|
|
// If we are inside the search term, do not error out just yet;
|
|
|
|
// instead, consider it as part of the search term, as a user convenience.
|
|
|
|
searchTerm += token;
|
|
|
|
lparenCtr += 1;
|
2024-07-04 02:27:59 +02:00
|
|
|
} else {
|
2024-03-18 13:20:47 +01:00
|
|
|
opQueue.unshift('lparen');
|
|
|
|
groupNegate.push(negate);
|
|
|
|
negate = false;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
case 'rparen':
|
|
|
|
if (lparenCtr > 0) {
|
|
|
|
searchTerm = assertNotNull(searchTerm) + token;
|
|
|
|
lparenCtr -= 1;
|
2024-07-04 02:27:59 +02:00
|
|
|
} else {
|
2024-03-18 13:20:47 +01:00
|
|
|
while (opQueue.length > 0) {
|
|
|
|
const op = assertNotUndefined(opQueue.shift());
|
|
|
|
if (op === 'lparen') {
|
|
|
|
break;
|
|
|
|
}
|
2024-05-29 04:54:45 +02:00
|
|
|
ret.tokenList.push(op);
|
2024-03-18 13:20:47 +01:00
|
|
|
}
|
|
|
|
if (groupNegate.length > 0 && groupNegate.pop()) {
|
2024-05-29 04:54:45 +02:00
|
|
|
ret.tokenList.push('not_op');
|
2024-03-18 13:20:47 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
case 'fuzz':
|
|
|
|
if (searchTerm) {
|
|
|
|
// For this and boost operations, we store the current match so far
|
|
|
|
// to a temporary string in case this is actually inside the term.
|
|
|
|
fuzz = parseFloat(token.substring(1));
|
|
|
|
boostFuzzStr += token;
|
2024-07-04 02:27:59 +02:00
|
|
|
} else {
|
2024-05-29 04:54:45 +02:00
|
|
|
beginTerm(token);
|
2024-03-18 13:20:47 +01:00
|
|
|
}
|
|
|
|
break;
|
|
|
|
case 'boost':
|
|
|
|
if (searchTerm) {
|
|
|
|
boost = parseFloat(token.substring(1));
|
|
|
|
boostFuzzStr += token;
|
2024-07-04 02:27:59 +02:00
|
|
|
} else {
|
2024-05-29 04:54:45 +02:00
|
|
|
beginTerm(token);
|
2024-03-18 13:20:47 +01:00
|
|
|
}
|
|
|
|
break;
|
|
|
|
case 'quoted_lit':
|
|
|
|
if (searchTerm) {
|
|
|
|
searchTerm += token;
|
2024-07-04 02:27:59 +02:00
|
|
|
} else {
|
2024-05-29 04:54:45 +02:00
|
|
|
beginTerm(token);
|
2024-03-18 13:20:47 +01:00
|
|
|
}
|
|
|
|
break;
|
|
|
|
case 'word':
|
|
|
|
if (searchTerm) {
|
|
|
|
if (fuzz !== 0 || boost !== 1) {
|
|
|
|
boost = 1;
|
|
|
|
fuzz = 0;
|
|
|
|
searchTerm += boostFuzzStr;
|
|
|
|
boostFuzzStr = '';
|
|
|
|
}
|
|
|
|
searchTerm += token;
|
2024-07-04 02:27:59 +02:00
|
|
|
} else {
|
2024-05-29 04:54:45 +02:00
|
|
|
beginTerm(token);
|
2024-03-18 13:20:47 +01:00
|
|
|
}
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
// Append extra spaces within search terms.
|
|
|
|
if (searchTerm) {
|
|
|
|
searchTerm += token;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Truncate string and restart the token tests.
|
|
|
|
localSearchStr = localSearchStr.substring(token.length);
|
2024-05-29 04:54:45 +02:00
|
|
|
index += token.length;
|
2024-03-18 13:20:47 +01:00
|
|
|
|
|
|
|
// Break since we have found a match.
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Append final tokens to the stack.
|
2024-05-29 04:54:45 +02:00
|
|
|
endTerm();
|
2024-03-18 13:20:47 +01:00
|
|
|
|
|
|
|
if (opQueue.indexOf('rparen') !== -1 || opQueue.indexOf('lparen') !== -1) {
|
2024-05-29 04:54:45 +02:00
|
|
|
ret.error = new ParseError('Mismatched parentheses.');
|
2024-03-18 13:20:47 +01:00
|
|
|
}
|
|
|
|
|
2024-05-29 04:54:45 +02:00
|
|
|
// Concatenate remaining operators to the token stack.
|
|
|
|
ret.tokenList.push(...opQueue);
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
export function generateLexArray(searchStr: string, parseTerm: ParseTerm): TokenList {
|
|
|
|
const ret = generateLexResult(searchStr, parseTerm);
|
|
|
|
|
|
|
|
if (ret.error) {
|
|
|
|
throw ret.error;
|
|
|
|
}
|
2024-03-18 13:20:47 +01:00
|
|
|
|
2024-05-29 04:54:45 +02:00
|
|
|
return ret.tokenList;
|
2024-03-18 13:20:47 +01:00
|
|
|
}
|