require 'cgi'
require 'uri'
class DText
MENTION_REGEXP = /(?<=^| )@\S+/
def self.u(string)
CGI.escape(string)
end
def self.h(string)
CGI.escapeHTML(string)
end
def self.quote(message, creator_name)
stripped_body = DText.strip_blocks(message, "quote")
"[quote]\n#{creator_name} said:\n\n#{stripped_body}\n[/quote]\n\n"
end
def self.strip_blocks(string, tag)
n = 0
stripped = ""
string = string.dup
string.gsub!(/\s*\[#{tag}\](?!\])\s*/mi, "\n\n[#{tag}]\n\n")
string.gsub!(/\s*\[\/#{tag}\]\s*/mi, "\n\n[/#{tag}]\n\n")
string.gsub!(/(?:\r?\n){3,}/, "\n\n")
string.strip!
string.split(/\n{2}/).each do |block|
case block
when "[#{tag}]"
n += 1
when "[/#{tag}]"
n -= 1
else
if n == 0
stripped << "#{block}\n\n"
end
end
end
stripped.strip
end
def self.parse_inline(str, options = {})
str.gsub!(/&/, "&")
str.gsub!(/, "<")
str.gsub!(/>/, ">")
str.gsub!(/\n/m, "
") unless options[:ignore_newlines]
str.gsub!(/\[b\](.+?)\[\/b\]/i, '\1')
str.gsub!(/\[i\](.+?)\[\/i\]/i, '\1')
str.gsub!(/\[s\](.+?)\[\/s\]/i, '\1')
str.gsub!(/\[u\](.+?)\[\/u\]/i, '\1')
str.gsub!(/\[tn\](.+?)\[\/tn\]/i, '
\1
') str = parse_mentions(str, options) str = parse_links(str) str = parse_aliased_wiki_links(str) str = parse_wiki_links(str) str = parse_post_links(str) str = parse_id_links(str) str end def self.parse_mentions(str, options = {}) return if options[:disable_mentions] str.gsub!(MENTION_REGEXP) do |name| next name unless name =~ /[a-z0-9]/i if name =~ /([:;,.!?\)\]<>])$/ name.chop! ch = $1 else ch = "" end '' + name + '' + ch end str end def self.parse_table_elements(str) str = parse_inline(str, :ignore_newlines => true) str.gsub!(/\[(\/?(?:tr|td|th|thead|tbody))\]/, '<\1>') str end def self.parse_links(str) str.gsub(/("[^"]+":(https?:\/\/|\/)[^\s\r\n<>]+|https?:\/\/[^\s\r\n<>]+|"[^"]+":\[(https?:\/\/|\/)[^\s\r\n<>\]]+\])+/) do |url| ch = "" if url =~ /^"([^"]+)":\[(.+)\]$/ text = $1 url = $2 else if url =~ /^"([^"]+)":(.+)$/ text = $1 url = $2 else text = url end if url =~ /([;,.!?\)\]<>])$/ url.chop! ch = $1 end end '' + text + '' + ch end end def self.parse_aliased_wiki_links(str) str.gsub(/\[\[([^\|\]]+)\|([^\]]+)\]\]/m) do text = CGI.unescapeHTML($2) title = CGI.unescapeHTML($1).tr(" ", "_").downcase %{#{h(text)}} end end def self.parse_wiki_links(str) str.gsub(/\[\[([^\]]+)\]\]/) do text = CGI.unescapeHTML($1) title = text.tr(" ", "_").downcase %{#{h(text)}} end end def self.parse_post_links(str) str.gsub(/\{\{([^\}]+)\}\}/) do tags = CGI.unescapeHTML($1) %{#{h(tags)}} end end def self.parse_id_links(str) str = str.gsub(/\bpost #(\d+)/i, %{post #\\1}) str = str.gsub(/\bforum #(\d+)/i, %{forum #\\1}) str = str.gsub(/\btopic #(\d+)(?!\/p\d|\d)/i, %{topic #\\1}) str = str.gsub(/\btopic #(\d+)\/p(\d+)/i, %{topic #\\1/p\\2}) str = str.gsub(/\bcomment #(\d+)/i, %{comment #\\1}) str = str.gsub(/\bpool #(\d+)/i, %{pool #\\1}) str = str.gsub(/\buser #(\d+)/i, %{user #\\1}) str = str.gsub(/\bartist #(\d+)/i, %{artist #\\1}) str = str.gsub(/\bissue #(\d+)/i, %{issue #\\1}) str = str.gsub(/\bpixiv #(\d+)(?!\/p\d|\d)/i, %{pixiv #\\1}) str = str.gsub(/\bpixiv #(\d+)\/p(\d+)/i, %{pixiv #\\1/p\\2}) end def self.parse_list(str, options = {}) html = "" current_item = "" layout = [] nest = 0 str.split(/\n/).each do |line| if line =~ /^\s*(\*+) (.+)/ if nest > 0 html += "#{current_item}
" end nest = $1.size current_item = parse_inline($2) else current_item += parse_inline(line) end if nest > layout.size html += "" end when "[/quote]" if options[:inline] "" elsif stack.last == "blockquote" stack.pop '' else "" end when "[spoiler]" stack << "spoiler" '
'
when /\[\/code\](?!\])/
flags[:code] = false
if stack.last == "pre"
stack.pop
""
else
""
end
when /\[expand(?:\=([^\]]*))?\](?!\])/
stack << "expandable"
expand_html = ''
end
else
if flags[:code]
CGI.escape_html(block) + "\n\n"
elsif flags[:table]
parse_table_elements(block)
else
'' + parse_inline(block) + '
' end end end stack.reverse.each do |tag| if tag == "blockquote" html << "" elsif tag == "div" html << "" elsif tag == "pre" html << "" elsif tag == "spoiler" html << "" elsif tag == "expandable" html << "" elsif tag == "table" html << "" end end html.join("").html_safe end def self.parse_strip(s) strip(s) end def self.strip(s) return "" if s.blank? s.gsub!(/[\r\n]+/m, " ") s.gsub!(/\[\/?(?:b|i|s|u|tn|tr|td|th|thead|tbody|quote|code|spoilers|spoiler|expand|table)\]/, "") s.gsub!(/\[\[([^\|\]]+)\|([^\]]+)\]\]/m, '\2') s.gsub!(/\[\[([^\]]+)\]\]/, '\1') s.gsub!(/\{\{([^\}]+)\}\}/, '\1') s.gsub!(/("[^"]+":(https?:\/\/|\/)[^\s\r\n<>]+|https?:\/\/[^\s\r\n<>]+|"[^"]+":\[(https?:\/\/|\/)[^\s\r\n<>\]]+\])+/) do |url| if url =~ /^"([^"]+)":\[(.+)\]$/ $1 elsif url =~ /^"([^"]+)":(.+)$/ $1 else url end end s end # extract the first paragraph `needle` occurs in. def self.excerpt(dtext, needle) dtext = dtext.gsub(/\r\n|\r|\n/, "\n") excerpt = ActionController::Base.helpers.excerpt(dtext, needle, separator: "\n\n", radius: 1, omission: "") excerpt end end