mirror of
https://github.com/Ionaru/easy-markdown-editor
synced 2025-07-02 15:44:28 -06:00
Merge pull request #34 from NextStepWebs/development
Spell checking, Update dependencies, Fix bugs
This commit is contained in:
commit
0eb0a94ba0
@ -61,6 +61,7 @@ simplemde.value();
|
||||
- **lineWrapping**: If set to `false`, disable line wrapping. Defaults to `true`.
|
||||
- **indentWithTabs**: If set to `false`, indent using spaces instead of tabs. Defaults to `true`.
|
||||
- **tabSize**: If set, customize the tab size. Defaults to `2`.
|
||||
- **spellChecker**: If set to `false`, disable the spell checker. Defaults to `true`.
|
||||
- **autosave**: *Saves the text that's being written. It will forget the text when the form is submitted.*
|
||||
- **enabled**: If set to `true`, autosave the text. Defaults to `false`.
|
||||
- **unique_id**: You must set a unique identifier so that SimpleMDE can autosave. Something that separates this from other textareas.
|
||||
@ -159,6 +160,7 @@ As mentioned earlier, SimpleMDE is an improvement of [lepture's Editor project](
|
||||
- Interface more closely resembles Bootstrap
|
||||
- Now mobile friendly
|
||||
- Option to autosave the text as you type
|
||||
- Now spell checks what you write
|
||||
- The text editor now automatically grows as you type more
|
||||
- Fixed a large amount of bugs
|
||||
- Switched to Font Awesome icons
|
||||
|
4
simplemde.min.css
vendored
4
simplemde.min.css
vendored
File diff suppressed because one or more lines are too long
16
simplemde.min.js
vendored
16
simplemde.min.js
vendored
File diff suppressed because one or more lines are too long
@ -7,9 +7,12 @@ Minify the JS in this order:
|
||||
1. `codemirror/overlay.js`
|
||||
1. `codemirror/gfm.js`
|
||||
1. `codemirror/xml.js`
|
||||
1. `typo/typo.js`
|
||||
1. `spell-checker/spell-checker.js`
|
||||
1. `marked.js`
|
||||
1. `simplemde.js`
|
||||
|
||||
Minify the CSS in this order:
|
||||
|
||||
1. `theme.css`
|
||||
1. `theme.css`
|
||||
1. `spell-checker/spell-checker.css`
|
@ -65,7 +65,7 @@
|
||||
setGuttersForLineNumbers(options);
|
||||
|
||||
var doc = options.value;
|
||||
if (typeof doc == "string") doc = new Doc(doc, options.mode);
|
||||
if (typeof doc == "string") doc = new Doc(doc, options.mode, null, options.lineSeparator);
|
||||
this.doc = doc;
|
||||
|
||||
var input = new CodeMirror.inputStyles[options.inputStyle](this);
|
||||
@ -714,7 +714,7 @@
|
||||
// width and height.
|
||||
removeChildren(display.cursorDiv);
|
||||
removeChildren(display.selectionDiv);
|
||||
display.gutters.style.height = 0;
|
||||
display.gutters.style.height = display.sizer.style.minHeight = 0;
|
||||
|
||||
if (different) {
|
||||
display.lastWrapHeight = update.wrapperHeight;
|
||||
@ -955,12 +955,22 @@
|
||||
lineView.node.removeChild(lineView.gutter);
|
||||
lineView.gutter = null;
|
||||
}
|
||||
if (lineView.gutterBackground) {
|
||||
lineView.node.removeChild(lineView.gutterBackground);
|
||||
lineView.gutterBackground = null;
|
||||
}
|
||||
if (lineView.line.gutterClass) {
|
||||
var wrap = ensureLineWrapped(lineView);
|
||||
lineView.gutterBackground = elt("div", null, "CodeMirror-gutter-background " + lineView.line.gutterClass,
|
||||
"left: " + (cm.options.fixedGutter ? dims.fixedPos : -dims.gutterTotalWidth) +
|
||||
"px; width: " + dims.gutterTotalWidth + "px");
|
||||
wrap.insertBefore(lineView.gutterBackground, lineView.text);
|
||||
}
|
||||
var markers = lineView.line.gutterMarkers;
|
||||
if (cm.options.lineNumbers || markers) {
|
||||
var wrap = ensureLineWrapped(lineView);
|
||||
var gutterWrap = lineView.gutter = elt("div", null, "CodeMirror-gutter-wrapper", "left: " +
|
||||
(cm.options.fixedGutter ? dims.fixedPos : -dims.gutterTotalWidth) +
|
||||
"px; width: " + dims.gutterTotalWidth + "px");
|
||||
(cm.options.fixedGutter ? dims.fixedPos : -dims.gutterTotalWidth) + "px");
|
||||
cm.display.input.setUneditable(gutterWrap);
|
||||
wrap.insertBefore(gutterWrap, lineView.text);
|
||||
if (lineView.line.gutterClass)
|
||||
@ -1082,13 +1092,18 @@
|
||||
if (!sel) sel = doc.sel;
|
||||
|
||||
var paste = cm.state.pasteIncoming || origin == "paste";
|
||||
var textLines = splitLines(inserted), multiPaste = null;
|
||||
var textLines = doc.splitLines(inserted), multiPaste = null;
|
||||
// When pasing N lines into N selections, insert one line per selection
|
||||
if (paste && sel.ranges.length > 1) {
|
||||
if (lastCopied && lastCopied.join("\n") == inserted)
|
||||
multiPaste = sel.ranges.length % lastCopied.length == 0 && map(lastCopied, splitLines);
|
||||
else if (textLines.length == sel.ranges.length)
|
||||
if (lastCopied && lastCopied.join("\n") == inserted) {
|
||||
if (sel.ranges.length % lastCopied.length == 0) {
|
||||
multiPaste = [];
|
||||
for (var i = 0; i < lastCopied.length; i++)
|
||||
multiPaste.push(doc.splitLines(lastCopied[i]));
|
||||
}
|
||||
} else if (textLines.length == sel.ranges.length) {
|
||||
multiPaste = map(textLines, function(l) { return [l]; });
|
||||
}
|
||||
}
|
||||
|
||||
// Normal behavior is to insert the new text into every selection
|
||||
@ -1388,7 +1403,7 @@
|
||||
// will be the case when there is a lot of text in the textarea,
|
||||
// in which case reading its value would be expensive.
|
||||
if (this.contextMenuPending || !cm.state.focused ||
|
||||
(hasSelection(input) && !prevInput) ||
|
||||
(hasSelection(input) && !prevInput && !this.composing) ||
|
||||
isReadOnly(cm) || cm.options.disableInput || cm.state.keySeq)
|
||||
return false;
|
||||
|
||||
@ -1756,7 +1771,7 @@
|
||||
var toNode = display.view[toIndex + 1].node.previousSibling;
|
||||
}
|
||||
|
||||
var newText = splitLines(domTextBetween(cm, fromNode, toNode, fromLine, toLine));
|
||||
var newText = cm.doc.splitLines(domTextBetween(cm, fromNode, toNode, fromLine, toLine));
|
||||
var oldText = getBetween(cm.doc, Pos(fromLine, 0), Pos(toLine, getLine(cm.doc, toLine).text.length));
|
||||
while (newText.length > 1 && oldText.length > 1) {
|
||||
if (lst(newText) == lst(oldText)) { newText.pop(); oldText.pop(); toLine--; }
|
||||
@ -1912,7 +1927,7 @@
|
||||
}
|
||||
|
||||
function domTextBetween(cm, from, to, fromLine, toLine) {
|
||||
var text = "", closing = false;
|
||||
var text = "", closing = false, lineSep = cm.doc.lineSeparator();
|
||||
function recognizeMarker(id) { return function(marker) { return marker.id == id; }; }
|
||||
function walk(node) {
|
||||
if (node.nodeType == 1) {
|
||||
@ -1926,7 +1941,7 @@
|
||||
if (markerID) {
|
||||
var found = cm.findMarks(Pos(fromLine, 0), Pos(toLine + 1, 0), recognizeMarker(+markerID));
|
||||
if (found.length && (range = found[0].find()))
|
||||
text += getBetween(cm.doc, range.from, range.to).join("\n");
|
||||
text += getBetween(cm.doc, range.from, range.to).join(lineSep);
|
||||
return;
|
||||
}
|
||||
if (node.getAttribute("contenteditable") == "false") return;
|
||||
@ -1938,7 +1953,7 @@
|
||||
var val = node.nodeValue;
|
||||
if (!val) return;
|
||||
if (closing) {
|
||||
text += "\n";
|
||||
text += lineSep;
|
||||
closing = false;
|
||||
}
|
||||
text += val;
|
||||
@ -2545,10 +2560,12 @@
|
||||
function prepareMeasureForLine(cm, line) {
|
||||
var lineN = lineNo(line);
|
||||
var view = findViewForLine(cm, lineN);
|
||||
if (view && !view.text)
|
||||
if (view && !view.text) {
|
||||
view = null;
|
||||
else if (view && view.changes)
|
||||
} else if (view && view.changes) {
|
||||
updateLineForChanges(cm, view, lineN, getDimensions(cm));
|
||||
cm.curOp.forceUpdate = true;
|
||||
}
|
||||
if (!view)
|
||||
view = updateExternalMeasurement(cm, line);
|
||||
|
||||
@ -3785,7 +3802,9 @@
|
||||
text[i] = reader.result;
|
||||
if (++read == n) {
|
||||
pos = clipPos(cm.doc, pos);
|
||||
var change = {from: pos, to: pos, text: splitLines(text.join("\n")), origin: "paste"};
|
||||
var change = {from: pos, to: pos,
|
||||
text: cm.doc.splitLines(text.join(cm.doc.lineSeparator())),
|
||||
origin: "paste"};
|
||||
makeChange(cm.doc, change);
|
||||
setSelectionReplaceHistory(cm.doc, simpleSelection(pos, changeEnd(change)));
|
||||
}
|
||||
@ -4468,7 +4487,7 @@
|
||||
function replaceRange(doc, code, from, to, origin) {
|
||||
if (!to) to = from;
|
||||
if (cmp(to, from) < 0) { var tmp = to; to = from; from = tmp; }
|
||||
if (typeof code == "string") code = splitLines(code);
|
||||
if (typeof code == "string") code = doc.splitLines(code);
|
||||
makeChange(doc, {from: from, to: to, text: code, origin: origin});
|
||||
}
|
||||
|
||||
@ -5263,6 +5282,22 @@
|
||||
clearCaches(cm);
|
||||
regChange(cm);
|
||||
}, true);
|
||||
option("lineSeparator", null, function(cm, val) {
|
||||
cm.doc.lineSep = val;
|
||||
if (!val) return;
|
||||
var newBreaks = [], lineNo = cm.doc.first;
|
||||
cm.doc.iter(function(line) {
|
||||
for (var pos = 0;;) {
|
||||
var found = line.text.indexOf(val, pos);
|
||||
if (found == -1) break;
|
||||
pos = found + val.length;
|
||||
newBreaks.push(Pos(lineNo, found));
|
||||
}
|
||||
lineNo++;
|
||||
});
|
||||
for (var i = newBreaks.length - 1; i >= 0; i--)
|
||||
replaceRange(cm.doc, val, newBreaks[i], Pos(newBreaks[i].line, newBreaks[i].ch + val.length))
|
||||
});
|
||||
option("specialChars", /[\t\u0000-\u0019\u00ad\u200b-\u200f\u2028\u2029\ufeff]/g, function(cm, val, old) {
|
||||
cm.state.specialChars = new RegExp(val.source + (val.test("\t") ? "" : "|\t"), "g");
|
||||
if (old != CodeMirror.Init) cm.refresh();
|
||||
@ -5613,7 +5648,8 @@
|
||||
} else if (cur.line > cm.doc.first) {
|
||||
var prev = getLine(cm.doc, cur.line - 1).text;
|
||||
if (prev)
|
||||
cm.replaceRange(line.charAt(0) + "\n" + prev.charAt(prev.length - 1),
|
||||
cm.replaceRange(line.charAt(0) + cm.doc.lineSeparator() +
|
||||
prev.charAt(prev.length - 1),
|
||||
Pos(cur.line - 1, prev.length - 1), Pos(cur.line, 1), "+transpose");
|
||||
}
|
||||
}
|
||||
@ -5627,7 +5663,7 @@
|
||||
var len = cm.listSelections().length;
|
||||
for (var i = 0; i < len; i++) {
|
||||
var range = cm.listSelections()[i];
|
||||
cm.replaceRange("\n", range.anchor, range.head, "+input");
|
||||
cm.replaceRange(cm.doc.lineSeparator(), range.anchor, range.head, "+input");
|
||||
cm.indentLine(range.from().line + 1, null, true);
|
||||
ensureCursorVisible(cm);
|
||||
}
|
||||
@ -6881,6 +6917,10 @@
|
||||
txt.setAttribute("role", "presentation");
|
||||
txt.setAttribute("cm-text", "\t");
|
||||
builder.col += tabWidth;
|
||||
} else if (m[0] == "\r" || m[0] == "\n") {
|
||||
var txt = content.appendChild(elt("span", m[0] == "\r" ? "␍" : "", "cm-invalidchar"));
|
||||
txt.setAttribute("cm-text", m[0]);
|
||||
builder.col += 1;
|
||||
} else {
|
||||
var txt = builder.cm.options.specialCharPlaceholder(m[0]);
|
||||
txt.setAttribute("cm-text", m[0]);
|
||||
@ -7226,8 +7266,8 @@
|
||||
};
|
||||
|
||||
var nextDocId = 0;
|
||||
var Doc = CodeMirror.Doc = function(text, mode, firstLine) {
|
||||
if (!(this instanceof Doc)) return new Doc(text, mode, firstLine);
|
||||
var Doc = CodeMirror.Doc = function(text, mode, firstLine, lineSep) {
|
||||
if (!(this instanceof Doc)) return new Doc(text, mode, firstLine, lineSep);
|
||||
if (firstLine == null) firstLine = 0;
|
||||
|
||||
BranchChunk.call(this, [new LeafChunk([new Line("", null)])]);
|
||||
@ -7241,8 +7281,9 @@
|
||||
this.history = new History(null);
|
||||
this.id = ++nextDocId;
|
||||
this.modeOption = mode;
|
||||
this.lineSep = lineSep;
|
||||
|
||||
if (typeof text == "string") text = splitLines(text);
|
||||
if (typeof text == "string") text = this.splitLines(text);
|
||||
updateDoc(this, {from: start, to: start, text: text});
|
||||
setSelection(this, simpleSelection(start), sel_dontScroll);
|
||||
};
|
||||
@ -7272,12 +7313,12 @@
|
||||
getValue: function(lineSep) {
|
||||
var lines = getLines(this, this.first, this.first + this.size);
|
||||
if (lineSep === false) return lines;
|
||||
return lines.join(lineSep || "\n");
|
||||
return lines.join(lineSep || this.lineSeparator());
|
||||
},
|
||||
setValue: docMethodOp(function(code) {
|
||||
var top = Pos(this.first, 0), last = this.first + this.size - 1;
|
||||
makeChange(this, {from: top, to: Pos(last, getLine(this, last).text.length),
|
||||
text: splitLines(code), origin: "setValue", full: true}, true);
|
||||
text: this.splitLines(code), origin: "setValue", full: true}, true);
|
||||
setSelection(this, simpleSelection(top));
|
||||
}),
|
||||
replaceRange: function(code, from, to, origin) {
|
||||
@ -7288,7 +7329,7 @@
|
||||
getRange: function(from, to, lineSep) {
|
||||
var lines = getBetween(this, clipPos(this, from), clipPos(this, to));
|
||||
if (lineSep === false) return lines;
|
||||
return lines.join(lineSep || "\n");
|
||||
return lines.join(lineSep || this.lineSeparator());
|
||||
},
|
||||
|
||||
getLine: function(line) {var l = this.getLineHandle(line); return l && l.text;},
|
||||
@ -7354,13 +7395,13 @@
|
||||
lines = lines ? lines.concat(sel) : sel;
|
||||
}
|
||||
if (lineSep === false) return lines;
|
||||
else return lines.join(lineSep || "\n");
|
||||
else return lines.join(lineSep || this.lineSeparator());
|
||||
},
|
||||
getSelections: function(lineSep) {
|
||||
var parts = [], ranges = this.sel.ranges;
|
||||
for (var i = 0; i < ranges.length; i++) {
|
||||
var sel = getBetween(this, ranges[i].from(), ranges[i].to());
|
||||
if (lineSep !== false) sel = sel.join(lineSep || "\n");
|
||||
if (lineSep !== false) sel = sel.join(lineSep || this.lineSeparator());
|
||||
parts[i] = sel;
|
||||
}
|
||||
return parts;
|
||||
@ -7375,7 +7416,7 @@
|
||||
var changes = [], sel = this.sel;
|
||||
for (var i = 0; i < sel.ranges.length; i++) {
|
||||
var range = sel.ranges[i];
|
||||
changes[i] = {from: range.from(), to: range.to(), text: splitLines(code[i]), origin: origin};
|
||||
changes[i] = {from: range.from(), to: range.to(), text: this.splitLines(code[i]), origin: origin};
|
||||
}
|
||||
var newSel = collapse && collapse != "end" && computeReplacedSel(this, changes, collapse);
|
||||
for (var i = changes.length - 1; i >= 0; i--)
|
||||
@ -7525,7 +7566,8 @@
|
||||
},
|
||||
|
||||
copy: function(copyHistory) {
|
||||
var doc = new Doc(getLines(this, this.first, this.first + this.size), this.modeOption, this.first);
|
||||
var doc = new Doc(getLines(this, this.first, this.first + this.size),
|
||||
this.modeOption, this.first, this.lineSep);
|
||||
doc.scrollTop = this.scrollTop; doc.scrollLeft = this.scrollLeft;
|
||||
doc.sel = this.sel;
|
||||
doc.extend = false;
|
||||
@ -7541,7 +7583,7 @@
|
||||
var from = this.first, to = this.first + this.size;
|
||||
if (options.from != null && options.from > from) from = options.from;
|
||||
if (options.to != null && options.to < to) to = options.to;
|
||||
var copy = new Doc(getLines(this, from, to), options.mode || this.modeOption, from);
|
||||
var copy = new Doc(getLines(this, from, to), options.mode || this.modeOption, from, this.lineSep);
|
||||
if (options.sharedHist) copy.history = this.history;
|
||||
(this.linked || (this.linked = [])).push({doc: copy, sharedHist: options.sharedHist});
|
||||
copy.linked = [{doc: this, isParent: true, sharedHist: options.sharedHist}];
|
||||
@ -7570,7 +7612,13 @@
|
||||
iterLinkedDocs: function(f) {linkedDocs(this, f);},
|
||||
|
||||
getMode: function() {return this.mode;},
|
||||
getEditor: function() {return this.cm;}
|
||||
getEditor: function() {return this.cm;},
|
||||
|
||||
splitLines: function(str) {
|
||||
if (this.lineSep) return str.split(this.lineSep);
|
||||
return splitLinesAuto(str);
|
||||
},
|
||||
lineSeparator: function() { return this.lineSep || "\n"; }
|
||||
});
|
||||
|
||||
// Public alias.
|
||||
@ -8269,7 +8317,12 @@
|
||||
} while (child = child.parentNode);
|
||||
};
|
||||
|
||||
function activeElt() { return document.activeElement; }
|
||||
function activeElt() {
|
||||
var activeElement = document.activeElement;
|
||||
while (activeElement && activeElement.root && activeElement.root.activeElement)
|
||||
activeElement = activeElement.root.activeElement;
|
||||
return activeElement;
|
||||
}
|
||||
// Older versions of IE throws unspecified error when touching
|
||||
// document.activeElement in some cases (during loading, in iframe)
|
||||
if (ie && ie_version < 11) activeElt = function() {
|
||||
@ -8371,7 +8424,7 @@
|
||||
|
||||
// See if "".split is the broken IE version, if so, provide an
|
||||
// alternative way to split lines.
|
||||
var splitLines = CodeMirror.splitLines = "\n\nb".split(/\n/).length != 3 ? function(string) {
|
||||
var splitLinesAuto = CodeMirror.splitLines = "\n\nb".split(/\n/).length != 3 ? function(string) {
|
||||
var pos = 0, result = [], l = string.length;
|
||||
while (pos <= l) {
|
||||
var nl = string.indexOf("\n", pos);
|
||||
@ -8729,7 +8782,7 @@
|
||||
|
||||
// THE END
|
||||
|
||||
CodeMirror.version = "5.4.1";
|
||||
CodeMirror.version = "5.5.1";
|
||||
|
||||
return CodeMirror;
|
||||
});
|
@ -1,5 +1,6 @@
|
||||
// CodeMirror, copyright (c) by Marijn Haverbeke and others
|
||||
// Distributed under an MIT license: http://codemirror.net/LICENSE
|
||||
// NOTE: This has been modified from the original version to add additional commands
|
||||
|
||||
|
||||
(function(mod) {
|
||||
if (typeof exports == "object" && typeof module == "object") // CommonJS
|
||||
mod(require("../../lib/codemirror"));
|
||||
|
@ -202,7 +202,8 @@ CodeMirror.defineMode("markdown", function(cmCfg, modeCfg) {
|
||||
|
||||
function htmlBlock(stream, state) {
|
||||
var style = htmlMode.token(stream, state.htmlState);
|
||||
if ((htmlFound && state.htmlState.tagStart === null && !state.htmlState.context) ||
|
||||
if ((htmlFound && state.htmlState.tagStart === null &&
|
||||
(!state.htmlState.context && state.htmlState.tokenize.isInText)) ||
|
||||
(state.md_inside && stream.current().indexOf(">") > -1)) {
|
||||
state.f = inlineNormal;
|
||||
state.block = blockNormal;
|
||||
@ -446,12 +447,11 @@ CodeMirror.defineMode("markdown", function(cmCfg, modeCfg) {
|
||||
return type + linkemail;
|
||||
}
|
||||
|
||||
if (ch === '<' && stream.match(/^\w/, false)) {
|
||||
if (stream.string.indexOf(">") != -1) {
|
||||
var atts = stream.string.substring(1,stream.string.indexOf(">"));
|
||||
if (/markdown\s*=\s*('|"){0,1}1('|"){0,1}/.test(atts)) {
|
||||
state.md_inside = true;
|
||||
}
|
||||
if (ch === '<' && stream.match(/^(!--|\w)/, false)) {
|
||||
var end = stream.string.indexOf(">", stream.pos);
|
||||
if (end != -1) {
|
||||
var atts = stream.string.substring(stream.start, end);
|
||||
if (/markdown\s*=\s*('|"){0,1}1('|"){0,1}/.test(atts)) state.md_inside = true;
|
||||
}
|
||||
stream.backUp(1);
|
||||
state.htmlState = CodeMirror.startState(htmlMode);
|
||||
|
@ -109,6 +109,7 @@ CodeMirror.defineMode("xml", function(config, parserConfig) {
|
||||
return null;
|
||||
}
|
||||
}
|
||||
inText.isInText = true;
|
||||
|
||||
function inTag(stream, state) {
|
||||
var ch = stream.next();
|
||||
|
@ -529,11 +529,21 @@ SimpleMDE.prototype.render = function(el) {
|
||||
keyMaps["Enter"] = "newlineAndIndentContinueMarkdownList";
|
||||
keyMaps['Tab'] = 'tabAndIndentContinueMarkdownList';
|
||||
keyMaps['Shift-Tab'] = 'shiftTabAndIndentContinueMarkdownList';
|
||||
|
||||
|
||||
var mode = "spell-checker";
|
||||
var backdrop = "gfm";
|
||||
|
||||
if (options.spellChecker === false) {
|
||||
mode = "gfm";
|
||||
backdrop = undefined;
|
||||
}
|
||||
|
||||
this.codemirror = CodeMirror.fromTextArea(el, {
|
||||
mode: 'gfm',
|
||||
mode: mode,
|
||||
backdrop: backdrop,
|
||||
theme: 'paper',
|
||||
tabSize: (options.tabSize != undefined) ? options.tabSize : 2,
|
||||
indentUnit: (options.tabSize != undefined) ? options.tabSize : 2,
|
||||
indentWithTabs: (options.indentWithTabs === false) ? false : true,
|
||||
lineNumbers: false,
|
||||
autofocus: (options.autofocus === true) ? true : false,
|
||||
|
3
source files/spell-checker/spell-checker.css
Normal file
3
source files/spell-checker/spell-checker.css
Normal file
@ -0,0 +1,3 @@
|
||||
.CodeMirror .cm-spell-error:not(.cm-url) {
|
||||
background: rgba(255, 0, 0, .15);
|
||||
}
|
83
source files/spell-checker/spell-checker.js
Normal file
83
source files/spell-checker/spell-checker.js
Normal file
@ -0,0 +1,83 @@
|
||||
CodeMirror.defineMode("spell-checker", function(config, parserConfig) {
|
||||
// Initialize data
|
||||
var num_loaded = 0;
|
||||
var aff_data = "";
|
||||
var dic_data = "";
|
||||
var typo;
|
||||
|
||||
|
||||
// Load AFF/DIC data
|
||||
var xhr_aff = new XMLHttpRequest();
|
||||
xhr_aff.open("GET", "https://cdn.jsdelivr.net/codemirror.spell-checker/latest/en_US.aff", true);
|
||||
xhr_aff.onload = function (e) {
|
||||
if (xhr_aff.readyState === 4 && xhr_aff.status === 200) {
|
||||
aff_data = xhr_aff.responseText;
|
||||
num_loaded++;
|
||||
|
||||
if(num_loaded == 2){
|
||||
typo = new Typo("en_US", aff_data, dic_data, {
|
||||
platform: 'any'
|
||||
});
|
||||
}
|
||||
}
|
||||
};
|
||||
xhr_aff.send(null);
|
||||
|
||||
var xhr_dic = new XMLHttpRequest();
|
||||
xhr_dic.open("GET", "https://cdn.jsdelivr.net/codemirror.spell-checker/latest/en_US.dic", true);
|
||||
xhr_dic.onload = function (e) {
|
||||
if (xhr_dic.readyState === 4 && xhr_dic.status === 200) {
|
||||
dic_data = xhr_dic.responseText;
|
||||
num_loaded++;
|
||||
|
||||
if(num_loaded == 2){
|
||||
typo = new Typo("en_US", aff_data, dic_data, {
|
||||
platform: 'any'
|
||||
});
|
||||
}
|
||||
}
|
||||
};
|
||||
xhr_dic.send(null);
|
||||
|
||||
|
||||
// Define what separates a word
|
||||
var rx_word = "!\"#$%&()*+,-./:;<=>?@[\\]^_`{|}~ ";
|
||||
|
||||
|
||||
// Create the overlay and such
|
||||
var overlay = {
|
||||
token: function(stream, state) {
|
||||
var ch = stream.peek();
|
||||
var word = "";
|
||||
|
||||
if(rx_word.includes(ch)) {
|
||||
stream.next();
|
||||
return null;
|
||||
}
|
||||
|
||||
while((ch = stream.peek()) != null && !rx_word.includes(ch)) {
|
||||
word += ch;
|
||||
stream.next();
|
||||
}
|
||||
|
||||
if(typo && !typo.check(word))
|
||||
return "spell-error"; // CSS class: cm-spell-error
|
||||
|
||||
return null;
|
||||
}
|
||||
};
|
||||
|
||||
var mode = CodeMirror.getMode(
|
||||
config, config.backdrop || "text/plain"
|
||||
);
|
||||
|
||||
return CodeMirror.overlayMode(mode, overlay, true);
|
||||
});
|
||||
|
||||
|
||||
// Because some browsers don't support this functionality yet
|
||||
if(!String.prototype.includes) {
|
||||
String.prototype.includes = function() {'use strict';
|
||||
return String.prototype.indexOf.apply(this, arguments) !== -1;
|
||||
};
|
||||
}
|
766
source files/typo/typo.js
Normal file
766
source files/typo/typo.js
Normal file
@ -0,0 +1,766 @@
|
||||
'use strict';
|
||||
|
||||
/**
|
||||
* Typo is a JavaScript implementation of a spellchecker using hunspell-style
|
||||
* dictionaries.
|
||||
*/
|
||||
|
||||
/**
|
||||
* Typo constructor.
|
||||
*
|
||||
* @param {String} [dictionary] The locale code of the dictionary being used. e.g.,
|
||||
* "en_US". This is only used to auto-load dictionaries.
|
||||
* @param {String} [affData] The data from the dictionary's .aff file. If omitted
|
||||
* and the first argument is supplied, in "chrome" platform,
|
||||
* the .aff file will be loaded automatically from
|
||||
* lib/typo/dictionaries/[dictionary]/[dictionary].aff
|
||||
* In other platform, it will be loaded from
|
||||
* [setting.path]/dictionaries/[dictionary]/[dictionary].aff
|
||||
* @param {String} [wordsData] The data from the dictionary's .dic file. If omitted,
|
||||
* and the first argument is supplied, in "chrome" platform,
|
||||
* the .dic file will be loaded automatically from
|
||||
* lib/typo/dictionaries/[dictionary]/[dictionary].dic
|
||||
* In other platform, it will be loaded from
|
||||
* [setting.path]/dictionaries/[dictionary]/[dictionary].dic
|
||||
* @param {Object} [settings] Constructor settings. Available properties are:
|
||||
* {String} [platform]: "chrome" for Chrome Extension or other
|
||||
* value for the usual web.
|
||||
* {String} [dictionaryPath]: path to load dictionary from in non-chrome
|
||||
* environment.
|
||||
* {Object} [flags]: flag information.
|
||||
*
|
||||
*
|
||||
* @returns {Typo} A Typo object.
|
||||
*/
|
||||
|
||||
var Typo = function (dictionary, affData, wordsData, settings) {
|
||||
settings = settings || {};
|
||||
|
||||
/** Determines the method used for auto-loading .aff and .dic files. **/
|
||||
this.platform = settings.platform || "chrome";
|
||||
|
||||
this.dictionary = null;
|
||||
|
||||
this.rules = {};
|
||||
this.dictionaryTable = {};
|
||||
|
||||
this.compoundRules = [];
|
||||
this.compoundRuleCodes = {};
|
||||
|
||||
this.replacementTable = [];
|
||||
|
||||
this.flags = settings.flags || {};
|
||||
|
||||
if (dictionary) {
|
||||
this.dictionary = dictionary;
|
||||
|
||||
if (this.platform == "chrome") {
|
||||
if (!affData) affData = this._readFile(chrome.extension.getURL("lib/typo/dictionaries/" + dictionary + "/" + dictionary + ".aff"));
|
||||
if (!wordsData) wordsData = this._readFile(chrome.extension.getURL("lib/typo/dictionaries/" + dictionary + "/" + dictionary + ".dic"));
|
||||
} else {
|
||||
var path = settings.dictionaryPath || '';
|
||||
|
||||
if (!affData) affData = this._readFile(path + "/" + dictionary + "/" + dictionary + ".aff");
|
||||
if (!wordsData) wordsData = this._readFile(path + "/" + dictionary + "/" + dictionary + ".dic");
|
||||
}
|
||||
|
||||
this.rules = this._parseAFF(affData);
|
||||
|
||||
// Save the rule codes that are used in compound rules.
|
||||
this.compoundRuleCodes = {};
|
||||
|
||||
for (var i = 0, _len = this.compoundRules.length; i < _len; i++) {
|
||||
var rule = this.compoundRules[i];
|
||||
|
||||
for (var j = 0, _jlen = rule.length; j < _jlen; j++) {
|
||||
this.compoundRuleCodes[rule[j]] = [];
|
||||
}
|
||||
}
|
||||
|
||||
// If we add this ONLYINCOMPOUND flag to this.compoundRuleCodes, then _parseDIC
|
||||
// will do the work of saving the list of words that are compound-only.
|
||||
if ("ONLYINCOMPOUND" in this.flags) {
|
||||
this.compoundRuleCodes[this.flags.ONLYINCOMPOUND] = [];
|
||||
}
|
||||
|
||||
this.dictionaryTable = this._parseDIC(wordsData);
|
||||
|
||||
// Get rid of any codes from the compound rule codes that are never used
|
||||
// (or that were special regex characters). Not especially necessary...
|
||||
for (var i in this.compoundRuleCodes) {
|
||||
if (this.compoundRuleCodes[i].length == 0) {
|
||||
delete this.compoundRuleCodes[i];
|
||||
}
|
||||
}
|
||||
|
||||
// Build the full regular expressions for each compound rule.
|
||||
// I have a feeling (but no confirmation yet) that this method of
|
||||
// testing for compound words is probably slow.
|
||||
for (var i = 0, _len = this.compoundRules.length; i < _len; i++) {
|
||||
var ruleText = this.compoundRules[i];
|
||||
|
||||
var expressionText = "";
|
||||
|
||||
for (var j = 0, _jlen = ruleText.length; j < _jlen; j++) {
|
||||
var character = ruleText[j];
|
||||
|
||||
if (character in this.compoundRuleCodes) {
|
||||
expressionText += "(" + this.compoundRuleCodes[character].join("|") + ")";
|
||||
}
|
||||
else {
|
||||
expressionText += character;
|
||||
}
|
||||
}
|
||||
|
||||
this.compoundRules[i] = new RegExp(expressionText, "i");
|
||||
}
|
||||
}
|
||||
|
||||
return this;
|
||||
};
|
||||
|
||||
Typo.prototype = {
|
||||
/**
|
||||
* Loads a Typo instance from a hash of all of the Typo properties.
|
||||
*
|
||||
* @param object obj A hash of Typo properties, probably gotten from a JSON.parse(JSON.stringify(typo_instance)).
|
||||
*/
|
||||
|
||||
load : function (obj) {
|
||||
for (var i in obj) {
|
||||
this[i] = obj[i];
|
||||
}
|
||||
|
||||
return this;
|
||||
},
|
||||
|
||||
/**
|
||||
* Read the contents of a file.
|
||||
*
|
||||
* @param {String} path The path (relative) to the file.
|
||||
* @param {String} [charset="ISO8859-1"] The expected charset of the file
|
||||
* @returns string The file data.
|
||||
*/
|
||||
|
||||
_readFile : function (path, charset) {
|
||||
if (!charset) charset = "ISO8859-1";
|
||||
|
||||
var req = new XMLHttpRequest();
|
||||
req.open("GET", path, false);
|
||||
|
||||
if (req.overrideMimeType)
|
||||
req.overrideMimeType("text/plain; charset=" + charset);
|
||||
|
||||
req.send(null);
|
||||
|
||||
return req.responseText;
|
||||
},
|
||||
|
||||
/**
|
||||
* Parse the rules out from a .aff file.
|
||||
*
|
||||
* @param {String} data The contents of the affix file.
|
||||
* @returns object The rules from the file.
|
||||
*/
|
||||
|
||||
_parseAFF : function (data) {
|
||||
var rules = {};
|
||||
|
||||
// Remove comment lines
|
||||
data = this._removeAffixComments(data);
|
||||
|
||||
var lines = data.split("\n");
|
||||
|
||||
for (var i = 0, _len = lines.length; i < _len; i++) {
|
||||
var line = lines[i];
|
||||
|
||||
var definitionParts = line.split(/\s+/);
|
||||
|
||||
var ruleType = definitionParts[0];
|
||||
|
||||
if (ruleType == "PFX" || ruleType == "SFX") {
|
||||
var ruleCode = definitionParts[1];
|
||||
var combineable = definitionParts[2];
|
||||
var numEntries = parseInt(definitionParts[3], 10);
|
||||
|
||||
var entries = [];
|
||||
|
||||
for (var j = i + 1, _jlen = i + 1 + numEntries; j < _jlen; j++) {
|
||||
var line = lines[j];
|
||||
|
||||
var lineParts = line.split(/\s+/);
|
||||
var charactersToRemove = lineParts[2];
|
||||
|
||||
var additionParts = lineParts[3].split("/");
|
||||
|
||||
var charactersToAdd = additionParts[0];
|
||||
if (charactersToAdd === "0") charactersToAdd = "";
|
||||
|
||||
var continuationClasses = this.parseRuleCodes(additionParts[1]);
|
||||
|
||||
var regexToMatch = lineParts[4];
|
||||
|
||||
var entry = {};
|
||||
entry.add = charactersToAdd;
|
||||
|
||||
if (continuationClasses.length > 0) entry.continuationClasses = continuationClasses;
|
||||
|
||||
if (regexToMatch !== ".") {
|
||||
if (ruleType === "SFX") {
|
||||
entry.match = new RegExp(regexToMatch + "$");
|
||||
}
|
||||
else {
|
||||
entry.match = new RegExp("^" + regexToMatch);
|
||||
}
|
||||
}
|
||||
|
||||
if (charactersToRemove != "0") {
|
||||
if (ruleType === "SFX") {
|
||||
entry.remove = new RegExp(charactersToRemove + "$");
|
||||
}
|
||||
else {
|
||||
entry.remove = charactersToRemove;
|
||||
}
|
||||
}
|
||||
|
||||
entries.push(entry);
|
||||
}
|
||||
|
||||
rules[ruleCode] = { "type" : ruleType, "combineable" : (combineable == "Y"), "entries" : entries };
|
||||
|
||||
i += numEntries;
|
||||
}
|
||||
else if (ruleType === "COMPOUNDRULE") {
|
||||
var numEntries = parseInt(definitionParts[1], 10);
|
||||
|
||||
for (var j = i + 1, _jlen = i + 1 + numEntries; j < _jlen; j++) {
|
||||
var line = lines[j];
|
||||
|
||||
var lineParts = line.split(/\s+/);
|
||||
this.compoundRules.push(lineParts[1]);
|
||||
}
|
||||
|
||||
i += numEntries;
|
||||
}
|
||||
else if (ruleType === "REP") {
|
||||
var lineParts = line.split(/\s+/);
|
||||
|
||||
if (lineParts.length === 3) {
|
||||
this.replacementTable.push([ lineParts[1], lineParts[2] ]);
|
||||
}
|
||||
}
|
||||
else {
|
||||
// ONLYINCOMPOUND
|
||||
// COMPOUNDMIN
|
||||
// FLAG
|
||||
// KEEPCASE
|
||||
// NEEDAFFIX
|
||||
|
||||
this.flags[ruleType] = definitionParts[1];
|
||||
}
|
||||
}
|
||||
|
||||
return rules;
|
||||
},
|
||||
|
||||
/**
|
||||
* Removes comment lines and then cleans up blank lines and trailing whitespace.
|
||||
*
|
||||
* @param {String} data The data from an affix file.
|
||||
* @return {String} The cleaned-up data.
|
||||
*/
|
||||
|
||||
_removeAffixComments : function (data) {
|
||||
// Remove comments
|
||||
data = data.replace(/#.*$/mg, "");
|
||||
|
||||
// Trim each line
|
||||
data = data.replace(/^\s\s*/m, '').replace(/\s\s*$/m, '');
|
||||
|
||||
// Remove blank lines.
|
||||
data = data.replace(/\n{2,}/g, "\n");
|
||||
|
||||
// Trim the entire string
|
||||
data = data.replace(/^\s\s*/, '').replace(/\s\s*$/, '');
|
||||
|
||||
return data;
|
||||
},
|
||||
|
||||
/**
|
||||
* Parses the words out from the .dic file.
|
||||
*
|
||||
* @param {String} data The data from the dictionary file.
|
||||
* @returns object The lookup table containing all of the words and
|
||||
* word forms from the dictionary.
|
||||
*/
|
||||
|
||||
_parseDIC : function (data) {
|
||||
data = this._removeDicComments(data);
|
||||
|
||||
var lines = data.split("\n");
|
||||
var dictionaryTable = {};
|
||||
|
||||
function addWord(word, rules) {
|
||||
// Some dictionaries will list the same word multiple times with different rule sets.
|
||||
if (!(word in dictionaryTable) || typeof dictionaryTable[word] != 'object') {
|
||||
dictionaryTable[word] = [];
|
||||
}
|
||||
|
||||
dictionaryTable[word].push(rules);
|
||||
}
|
||||
|
||||
// The first line is the number of words in the dictionary.
|
||||
for (var i = 1, _len = lines.length; i < _len; i++) {
|
||||
var line = lines[i];
|
||||
|
||||
var parts = line.split("/", 2);
|
||||
|
||||
var word = parts[0];
|
||||
|
||||
// Now for each affix rule, generate that form of the word.
|
||||
if (parts.length > 1) {
|
||||
var ruleCodesArray = this.parseRuleCodes(parts[1]);
|
||||
|
||||
// Save the ruleCodes for compound word situations.
|
||||
if (!("NEEDAFFIX" in this.flags) || ruleCodesArray.indexOf(this.flags.NEEDAFFIX) == -1) {
|
||||
addWord(word, ruleCodesArray);
|
||||
}
|
||||
|
||||
for (var j = 0, _jlen = ruleCodesArray.length; j < _jlen; j++) {
|
||||
var code = ruleCodesArray[j];
|
||||
|
||||
var rule = this.rules[code];
|
||||
|
||||
if (rule) {
|
||||
var newWords = this._applyRule(word, rule);
|
||||
|
||||
for (var ii = 0, _iilen = newWords.length; ii < _iilen; ii++) {
|
||||
var newWord = newWords[ii];
|
||||
|
||||
addWord(newWord, []);
|
||||
|
||||
if (rule.combineable) {
|
||||
for (var k = j + 1; k < _jlen; k++) {
|
||||
var combineCode = ruleCodesArray[k];
|
||||
|
||||
var combineRule = this.rules[combineCode];
|
||||
|
||||
if (combineRule) {
|
||||
if (combineRule.combineable && (rule.type != combineRule.type)) {
|
||||
var otherNewWords = this._applyRule(newWord, combineRule);
|
||||
|
||||
for (var iii = 0, _iiilen = otherNewWords.length; iii < _iiilen; iii++) {
|
||||
var otherNewWord = otherNewWords[iii];
|
||||
addWord(otherNewWord, []);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (code in this.compoundRuleCodes) {
|
||||
this.compoundRuleCodes[code].push(word);
|
||||
}
|
||||
}
|
||||
}
|
||||
else {
|
||||
addWord(word.trim(), []);
|
||||
}
|
||||
}
|
||||
|
||||
return dictionaryTable;
|
||||
},
|
||||
|
||||
|
||||
/**
|
||||
* Removes comment lines and then cleans up blank lines and trailing whitespace.
|
||||
*
|
||||
* @param {String} data The data from a .dic file.
|
||||
* @return {String} The cleaned-up data.
|
||||
*/
|
||||
|
||||
_removeDicComments : function (data) {
|
||||
// I can't find any official documentation on it, but at least the de_DE
|
||||
// dictionary uses tab-indented lines as comments.
|
||||
|
||||
// Remove comments
|
||||
data = data.replace(/^\t.*$/mg, "");
|
||||
|
||||
return data;
|
||||
|
||||
// Trim each line
|
||||
data = data.replace(/^\s\s*/m, '').replace(/\s\s*$/m, '');
|
||||
|
||||
// Remove blank lines.
|
||||
data = data.replace(/\n{2,}/g, "\n");
|
||||
|
||||
// Trim the entire string
|
||||
data = data.replace(/^\s\s*/, '').replace(/\s\s*$/, '');
|
||||
|
||||
return data;
|
||||
},
|
||||
|
||||
parseRuleCodes : function (textCodes) {
|
||||
if (!textCodes) {
|
||||
return [];
|
||||
}
|
||||
else if (!("FLAG" in this.flags)) {
|
||||
return textCodes.split("");
|
||||
}
|
||||
else if (this.flags.FLAG === "long") {
|
||||
var flags = [];
|
||||
|
||||
for (var i = 0, _len = textCodes.length; i < _len; i += 2) {
|
||||
flags.push(textCodes.substr(i, 2));
|
||||
}
|
||||
|
||||
return flags;
|
||||
}
|
||||
else if (this.flags.FLAG === "num") {
|
||||
return textCode.split(",");
|
||||
}
|
||||
},
|
||||
|
||||
/**
|
||||
* Applies an affix rule to a word.
|
||||
*
|
||||
* @param {String} word The base word.
|
||||
* @param {Object} rule The affix rule.
|
||||
* @returns {String[]} The new words generated by the rule.
|
||||
*/
|
||||
|
||||
_applyRule : function (word, rule) {
|
||||
var entries = rule.entries;
|
||||
var newWords = [];
|
||||
|
||||
for (var i = 0, _len = entries.length; i < _len; i++) {
|
||||
var entry = entries[i];
|
||||
|
||||
if (!entry.match || word.match(entry.match)) {
|
||||
var newWord = word;
|
||||
|
||||
if (entry.remove) {
|
||||
newWord = newWord.replace(entry.remove, "");
|
||||
}
|
||||
|
||||
if (rule.type === "SFX") {
|
||||
newWord = newWord + entry.add;
|
||||
}
|
||||
else {
|
||||
newWord = entry.add + newWord;
|
||||
}
|
||||
|
||||
newWords.push(newWord);
|
||||
|
||||
if ("continuationClasses" in entry) {
|
||||
for (var j = 0, _jlen = entry.continuationClasses.length; j < _jlen; j++) {
|
||||
var continuationRule = this.rules[entry.continuationClasses[j]];
|
||||
|
||||
if (continuationRule) {
|
||||
newWords = newWords.concat(this._applyRule(newWord, continuationRule));
|
||||
}
|
||||
/*
|
||||
else {
|
||||
// This shouldn't happen, but it does, at least in the de_DE dictionary.
|
||||
// I think the author mistakenly supplied lower-case rule codes instead
|
||||
// of upper-case.
|
||||
}
|
||||
*/
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return newWords;
|
||||
},
|
||||
|
||||
/**
|
||||
* Checks whether a word or a capitalization variant exists in the current dictionary.
|
||||
* The word is trimmed and several variations of capitalizations are checked.
|
||||
* If you want to check a word without any changes made to it, call checkExact()
|
||||
*
|
||||
* @see http://blog.stevenlevithan.com/archives/faster-trim-javascript re:trimming function
|
||||
*
|
||||
* @param {String} aWord The word to check.
|
||||
* @returns {Boolean}
|
||||
*/
|
||||
|
||||
check : function (aWord) {
|
||||
// Remove leading and trailing whitespace
|
||||
var trimmedWord = aWord.replace(/^\s\s*/, '').replace(/\s\s*$/, '');
|
||||
|
||||
if (this.checkExact(trimmedWord)) {
|
||||
return true;
|
||||
}
|
||||
|
||||
// The exact word is not in the dictionary.
|
||||
if (trimmedWord.toUpperCase() === trimmedWord) {
|
||||
// The word was supplied in all uppercase.
|
||||
// Check for a capitalized form of the word.
|
||||
var capitalizedWord = trimmedWord[0] + trimmedWord.substring(1).toLowerCase();
|
||||
|
||||
if (this.hasFlag(capitalizedWord, "KEEPCASE")) {
|
||||
// Capitalization variants are not allowed for this word.
|
||||
return false;
|
||||
}
|
||||
|
||||
if (this.checkExact(capitalizedWord)) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
var lowercaseWord = trimmedWord.toLowerCase();
|
||||
|
||||
if (lowercaseWord !== trimmedWord) {
|
||||
if (this.hasFlag(lowercaseWord, "KEEPCASE")) {
|
||||
// Capitalization variants are not allowed for this word.
|
||||
return false;
|
||||
}
|
||||
|
||||
// Check for a lowercase form
|
||||
if (this.checkExact(lowercaseWord)) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
return false;
|
||||
},
|
||||
|
||||
/**
|
||||
* Checks whether a word exists in the current dictionary.
|
||||
*
|
||||
* @param {String} word The word to check.
|
||||
* @returns {Boolean}
|
||||
*/
|
||||
|
||||
checkExact : function (word) {
|
||||
var ruleCodes = this.dictionaryTable[word];
|
||||
|
||||
if (typeof ruleCodes === 'undefined') {
|
||||
// Check if this might be a compound word.
|
||||
if ("COMPOUNDMIN" in this.flags && word.length >= this.flags.COMPOUNDMIN) {
|
||||
for (var i = 0, _len = this.compoundRules.length; i < _len; i++) {
|
||||
if (word.match(this.compoundRules[i])) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
else {
|
||||
for (var i = 0, _len = ruleCodes.length; i < _len; i++) {
|
||||
if (!this.hasFlag(word, "ONLYINCOMPOUND", ruleCodes[i])) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
},
|
||||
|
||||
/**
|
||||
* Looks up whether a given word is flagged with a given flag.
|
||||
*
|
||||
* @param {String} word The word in question.
|
||||
* @param {String} flag The flag in question.
|
||||
* @return {Boolean}
|
||||
*/
|
||||
|
||||
hasFlag : function (word, flag, wordFlags) {
|
||||
if (flag in this.flags) {
|
||||
if (typeof wordFlags === 'undefined') {
|
||||
var wordFlags = Array.prototype.concat.apply([], this.dictionaryTable[word]);
|
||||
}
|
||||
|
||||
if (wordFlags && wordFlags.indexOf(this.flags[flag]) !== -1) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
return false;
|
||||
},
|
||||
|
||||
/**
|
||||
* Returns a list of suggestions for a misspelled word.
|
||||
*
|
||||
* @see http://www.norvig.com/spell-correct.html for the basis of this suggestor.
|
||||
* This suggestor is primitive, but it works.
|
||||
*
|
||||
* @param {String} word The misspelling.
|
||||
* @param {Number} [limit=5] The maximum number of suggestions to return.
|
||||
* @returns {String[]} The array of suggestions.
|
||||
*/
|
||||
|
||||
alphabet : "",
|
||||
|
||||
suggest : function (word, limit) {
|
||||
if (!limit) limit = 5;
|
||||
|
||||
if (this.check(word)) return [];
|
||||
|
||||
// Check the replacement table.
|
||||
for (var i = 0, _len = this.replacementTable.length; i < _len; i++) {
|
||||
var replacementEntry = this.replacementTable[i];
|
||||
|
||||
if (word.indexOf(replacementEntry[0]) !== -1) {
|
||||
var correctedWord = word.replace(replacementEntry[0], replacementEntry[1]);
|
||||
|
||||
if (this.check(correctedWord)) {
|
||||
return [ correctedWord ];
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var self = this;
|
||||
self.alphabet = "abcdefghijklmnopqrstuvwxyz";
|
||||
|
||||
/*
|
||||
if (!self.alphabet) {
|
||||
// Use the alphabet as implicitly defined by the words in the dictionary.
|
||||
var alphaHash = {};
|
||||
|
||||
for (var i in self.dictionaryTable) {
|
||||
for (var j = 0, _len = i.length; j < _len; j++) {
|
||||
alphaHash[i[j]] = true;
|
||||
}
|
||||
}
|
||||
|
||||
for (var i in alphaHash) {
|
||||
self.alphabet += i;
|
||||
}
|
||||
|
||||
var alphaArray = self.alphabet.split("");
|
||||
alphaArray.sort();
|
||||
self.alphabet = alphaArray.join("");
|
||||
}
|
||||
*/
|
||||
|
||||
function edits1(words) {
|
||||
var rv = [];
|
||||
|
||||
for (var ii = 0, _iilen = words.length; ii < _iilen; ii++) {
|
||||
var word = words[ii];
|
||||
|
||||
var splits = [];
|
||||
|
||||
for (var i = 0, _len = word.length + 1; i < _len; i++) {
|
||||
splits.push([ word.substring(0, i), word.substring(i, word.length) ]);
|
||||
}
|
||||
|
||||
var deletes = [];
|
||||
|
||||
for (var i = 0, _len = splits.length; i < _len; i++) {
|
||||
var s = splits[i];
|
||||
|
||||
if (s[1]) {
|
||||
deletes.push(s[0] + s[1].substring(1));
|
||||
}
|
||||
}
|
||||
|
||||
var transposes = [];
|
||||
|
||||
for (var i = 0, _len = splits.length; i < _len; i++) {
|
||||
var s = splits[i];
|
||||
|
||||
if (s[1].length > 1) {
|
||||
transposes.push(s[0] + s[1][1] + s[1][0] + s[1].substring(2));
|
||||
}
|
||||
}
|
||||
|
||||
var replaces = [];
|
||||
|
||||
for (var i = 0, _len = splits.length; i < _len; i++) {
|
||||
var s = splits[i];
|
||||
|
||||
if (s[1]) {
|
||||
for (var j = 0, _jlen = self.alphabet.length; j < _jlen; j++) {
|
||||
replaces.push(s[0] + self.alphabet[j] + s[1].substring(1));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var inserts = [];
|
||||
|
||||
for (var i = 0, _len = splits.length; i < _len; i++) {
|
||||
var s = splits[i];
|
||||
|
||||
if (s[1]) {
|
||||
for (var j = 0, _jlen = self.alphabet.length; j < _jlen; j++) {
|
||||
replaces.push(s[0] + self.alphabet[j] + s[1]);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
rv = rv.concat(deletes);
|
||||
rv = rv.concat(transposes);
|
||||
rv = rv.concat(replaces);
|
||||
rv = rv.concat(inserts);
|
||||
}
|
||||
|
||||
return rv;
|
||||
}
|
||||
|
||||
function known(words) {
|
||||
var rv = [];
|
||||
|
||||
for (var i = 0; i < words.length; i++) {
|
||||
if (self.check(words[i])) {
|
||||
rv.push(words[i]);
|
||||
}
|
||||
}
|
||||
|
||||
return rv;
|
||||
}
|
||||
|
||||
function correct(word) {
|
||||
// Get the edit-distance-1 and edit-distance-2 forms of this word.
|
||||
var ed1 = edits1([word]);
|
||||
var ed2 = edits1(ed1);
|
||||
|
||||
var corrections = known(ed1).concat(known(ed2));
|
||||
|
||||
// Sort the edits based on how many different ways they were created.
|
||||
var weighted_corrections = {};
|
||||
|
||||
for (var i = 0, _len = corrections.length; i < _len; i++) {
|
||||
if (!(corrections[i] in weighted_corrections)) {
|
||||
weighted_corrections[corrections[i]] = 1;
|
||||
}
|
||||
else {
|
||||
weighted_corrections[corrections[i]] += 1;
|
||||
}
|
||||
}
|
||||
|
||||
var sorted_corrections = [];
|
||||
|
||||
for (var i in weighted_corrections) {
|
||||
sorted_corrections.push([ i, weighted_corrections[i] ]);
|
||||
}
|
||||
|
||||
function sorter(a, b) {
|
||||
if (a[1] < b[1]) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
sorted_corrections.sort(sorter).reverse();
|
||||
|
||||
var rv = [];
|
||||
|
||||
for (var i = 0, _len = Math.min(limit, sorted_corrections.length); i < _len; i++) {
|
||||
if (!self.hasFlag(sorted_corrections[i][0], "NOSUGGEST")) {
|
||||
rv.push(sorted_corrections[i][0]);
|
||||
}
|
||||
}
|
||||
|
||||
return rv;
|
||||
}
|
||||
|
||||
return correct(word);
|
||||
}
|
||||
};
|
Loading…
x
Reference in New Issue
Block a user