Merge pull request #1547 from Cimbali/R

pull/1561/head
Simon Legner 4 years ago committed by GitHub
commit e9c854c9e9
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23

@ -686,6 +686,11 @@ credits = [
'2012-2018 The Qt Company Ltd', '2012-2018 The Qt Company Ltd',
'GFDL', 'GFDL',
'https://doc.qt.io/qt-5/licensing.html' 'https://doc.qt.io/qt-5/licensing.html'
], [
'R',
'19992012 R Foundation for Statistical Computing',
'GPL',
'https://svn.r-project.org/R/trunk/COPYING'
], [ ], [
'Ramda', 'Ramda',
'2013-2020 Scott Sauyet and Michael Hurley', '2013-2020 Scott Sauyet and Michael Hurley',

@ -0,0 +1,61 @@
module Docs
class R
class CleanHtmlFilter < Filter
def call
slug_parts = slug.split('/')
if root_page?
css('a[href$="/00index"]').each do |pkg|
pkg['href'] = "/r-#{pkg['href'].split('/')[1]}/"
end
elsif slug_parts[0] == 'library'
title = at_css('h2')
title.inner_html = "<code>#{slug_parts[3]}</code> #{title.content}"
summary = at_css('table[summary]')
summary.remove if summary
css('hr ~ *, hr').remove
elsif slug_parts[-2] == 'manual'
css('table.menu, div.header, hr, h2.contents-heading, div.contents, table.index-cp, table.index-vr, table[summary]').remove
css('h2').each do |node|
node.remove if node.content.end_with? ' index'
end
css('span[id] + h1, span[id] + h2, span[id] + h3, span[id] + h4, span[id] + h5, span[id] + h6').each do |node|
# We need the first of the series of span with ids
span = node.previous_element
while span.previous
prev = span.previous_element
break unless prev.name == 'span' and prev['id']
span.remove
span = prev
end
node['id'] = span['id']
span.remove
css('div.example').each do |node|
node.replace(node.children)
end
end
css('h1 + h1').remove
css('.footnote h5').each do |node|
anchor = node.at_css('a[id]')
footnote = node.next_sibling
footnote.inner_html = "<strong>#{anchor.text}</strong>&nbsp;#{footnote.inner_html}"
footnote['id'] = anchor['id']
node.remove
end
end
doc
end
end
end
end

@ -0,0 +1,84 @@
module Docs
class R
class EntriesFilter < Docs::EntriesFilter
PKG_INDEX_ENTRIES = Hash.new []
def call
if slug_parts[-1] == '00Index'
dir = File.dirname(result[:subpath])
css('tr a').each do |link|
PKG_INDEX_ENTRIES[link['href']] += [link.text]
next if link['href'] == link.text
context[:replace_paths][File.join(dir, "#{link.text}.html")] = File.join(dir, "#{link['href']}.html")
end
end
super
end
def slug_parts
slug.split('/')
end
def is_package?
slug_parts[0] == 'library'
end
def is_manual?
slug_parts[1] == 'manual'
end
def get_name
return at_css('h2').content if is_package?
title = at_css('h1.settitle')
title ? title.content : at_css('h1, h2').content
end
def get_type
return slug_parts[1] if is_package?
return at_css('h1.settitle').content if is_manual?
end
def include_default_entry?
is_package? and not slug_parts[-1] == '00Index'
end
def manual_section(node)
title = node.content.sub /^((Appendix )?[A-Z]|[0-9]+)(\.[0-9]+)* /, ''
title unless ['References', 'Preface', 'Acknowledgements'].include?(title) or title.end_with?(' index')
end
def additional_entries
if is_package? and slug_parts[-1] != '00Index'
page = slug_parts[-1]
return [page] + PKG_INDEX_ENTRIES.fetch(page, [])
end
return [] unless is_manual?
entries = []
unless slug_parts[-1].downcase == 'r-intro'
# Single top-level category
css('div.contents > ul a').each do |link|
link_name = manual_section(link)
entries << [link_name, link['href'].split('#')[1], name] unless link_name.nil?
end
else
# Split 1st level of manual into different categories
css('div.contents > ul > li').each do |node|
type = manual_section(node.at_css('a'))
next if type.nil?
node.css('> ul a').each do |link|
link_name = link.content.sub /^[0-9A-Z]+(\.[0-9]+)* /, ''
entries << [link_name, link['href'].split('#')[1], type]
end
end
end
return entries
end
private
end
end
end

@ -0,0 +1,53 @@
module Docs
class R < FileScraper
self.name = 'R'
self.slug = 'r'
self.type = 'simple'
self.release = '4.1.0'
self.links = {
home: 'https://www.r-project.org/',
code: 'https://svn.r-project.org/R/'
}
self.root_path = 'doc/html/packages.html'
html_filters.push 'r/entries', 'r/clean_html'
options[:skip_links] = false
options[:attribution] = <<-HTML
Copyright (&copy;) 19992012 R Foundation for Statistical Computing.<br>
Licensed under the <a href="https://www.gnu.org/copyleft/gpl.html">GNU General Public License</a>.
HTML
# Never want those
options[:skip_patterns] = [
/\/DESCRIPTION$/,
/\/NEWS(\.[^\/]*)?$/,
/\/doc\/index\.html$/,
/\/demo$/,
/\.pdf$/
]
options[:replace_paths] = {
## We want to fix links like so but only if the targets dont exist:
# 'library/MASS/html/cov.mve.html' => 'library/MASS/html/cov.rob.html'
## Paths for target packages or keywords that do not have their own file
## are generated in the entries filter from 00Index.html files
}
options[:skip] = %w(
doc/html/packages-head-utf8.html
doc/html/SearchOn.html
doc/html/Search.html
doc/html/UserManuals.html
doc/html/faq.html
doc/manual/R-FAQ.html
doc/manual/R-admin.html
doc/manual/R-exts.html
doc/manual/R-ints.html
doc/manual/R-lang.html
)
end
end

Binary file not shown.

After

Width:  |  Height:  |  Size: 716 B

Binary file not shown.

After

Width:  |  Height:  |  Size: 1.4 KiB

@ -0,0 +1 @@
https://svn.r-project.org/R/trunk/doc/html/Rlogo.svg
Loading…
Cancel
Save