Files
danbooru/app/models/amazon_backup.rb
r888888888 abce4d2551 Raise error on unpermitted params.
Fail loudly if we forget to whitelist a param instead of silently
ignoring it.

misc models: convert to strong params.

artist commentaries: convert to strong params.

* Disallow changing or setting post_id to a nonexistent post.

artists: convert to strong params.

* Disallow setting `is_banned` in create/update actions. Changing it
  this way instead of with the ban/unban actions would leave the artist in
  a partially banned state.

bans: convert to strong params.

* Disallow changing the user_id after the ban has been created.

comments: convert to strong params.

favorite groups: convert to strong params.

news updates: convert to strong params.

post appeals: convert to strong params.

post flags: convert to strong params.

* Disallow users from setting the `is_deleted` / `is_resolved` flags.

ip bans: convert to strong params.

user feedbacks: convert to strong params.

* Disallow users from setting `disable_dmail_notification` when creating feedbacks.
* Disallow changing the user_id after the feedback has been created.

notes: convert to strong params.

wiki pages: convert to strong params.

* Also fix non-Builders being able to delete wiki pages.

saved searches: convert to strong params.

pools: convert to strong params.

* Disallow setting `post_count` or `is_deleted` in create/update actions.

janitor trials: convert to strong params.

post disapprovals: convert to strong params.

* Factor out quick-mod bar to shared partial.
* Fix quick-mod bar to use `Post#is_approvable?` to determine visibility
  of Approve button.

dmail filters: convert to strong params.

password resets: convert to strong params.

user name change requests: convert to strong params.

posts: convert to strong params.

users: convert to strong params.

* Disallow setting password_hash, last_logged_in_at, last_forum_read_at,
  has_mail, and dmail_filter_attributes[user_id].

* Remove initialize_default_image_size (dead code).

uploads: convert to strong params.

* Remove `initialize_status` because status already defaults to pending
  in the database.

tag aliases/implications: convert to strong params.

tags: convert to strong params.

forum posts: convert to strong params.

* Disallow changing the topic_id after creating the post.
* Disallow setting is_deleted (destroy/undelete actions should be used instead).
* Remove is_sticky / is_locked (nonexistent attributes).

forum topics: convert to strong params.

* merges https://github.com/evazion/danbooru/tree/wip-rails-5.1
* lock pg gem to 0.21 (1.0.0 is incompatible with rails 5.1.4)
* switch to factorybot and change all references

Co-authored-by: r888888888 <r888888888@gmail.com>
Co-authored-by: evazion <noizave@gmail.com>

add diffs
2018-04-06 18:09:57 -07:00

160 lines
4.9 KiB
Ruby

# donmai.us specific
require 'base64'
require 'digest/md5'
class AmazonBackup < ApplicationRecord
def self.last_id
first.last_id
end
def self.update_id(new_id)
first.update_column(:last_id, new_id)
end
def self.restore_from_glacier(min_id = 1_431_595, max_id = 2_000_000)
credentials = Aws::Credentials.new(Danbooru.config.aws_access_key_id, Danbooru.config.aws_secret_access_key)
Aws.config.update({
region: "us-east-1",
credentials: credentials
})
client = Aws::S3::Client.new
bucket = Danbooru.config.aws_s3_bucket_name
f = lambda do |key|
begin
client.restore_object(
bucket: bucket,
key: key,
restore_request: {
days: 7,
glacier_job_parameters: {
tier: "Bulk"
}
}
)
rescue Aws::S3::Errors::InternalError
puts " internal error...retrying"
sleep 30
retry
rescue Aws::S3::Errors::InvalidObjectState
puts " already restored #{key}"
rescue Aws::S3::Errors::NoSuchKey
puts " missing #{key}"
file_path = "/var/www/danbooru2/shared/data/#{key}"
if File.exists?(file_path)
base64_md5 = Digest::MD5.base64digest(File.read(file_path))
body = open(file_path, "rb")
client.put_object(bucket: bucket, key: key, body: body, content_md5: base64_md5, acl: "public-read")
puts " uploaded"
end
rescue Aws::S3::Errors::RestoreAlreadyInProgress
puts " already restoring #{key}"
end
end
Post.where("id >= ? and id <= ?", min_id, max_id).find_each do |post|
if post.has_large?
puts "large:#{post.id}"
key = "sample/" + File.basename(post.large_file_path)
f.call(key)
end
if post.has_preview?
puts "preview:#{post.id}"
key = "preview/" + File.basename(post.preview_file_path)
f.call(key)
end
puts "#{post.id}"
key = File.basename(post.file_path)
f.call(key)
end
end
def self.copy_to_standard(min_id = 1_191_247, max_id = 2_000_000)
credentials = Aws::Credentials.new(Danbooru.config.aws_access_key_id, Danbooru.config.aws_secret_access_key)
Aws.config.update({
region: "us-east-1",
credentials: credentials
})
client = Aws::S3::Client.new
bucket = Danbooru.config.aws_s3_bucket_name
f = lambda do |key|
begin
client.copy_object(bucket: bucket, key: key, acl: "public-read", storage_class: "STANDARD", copy_source: "/#{bucket}/#{key}", metadata_directive: "COPY")
puts " copied #{key}"
rescue Aws::S3::Errors::InternalError
puts " internal error...retrying"
sleep 30
retry
rescue Aws::S3::Errors::InvalidObjectState
puts " invalid state #{key}"
rescue Aws::S3::Errors::NoSuchKey
puts " missing #{key}"
end
end
Post.where("id >= ? and id <= ?", min_id, max_id).find_each do |post|
next unless post.has_preview?
if post.has_preview?
puts "preview:#{post.id}"
key = "preview/" + File.basename(post.preview_file_path)
f.call(key)
end
if post.has_large?
puts "large:#{post.id}"
key = "sample/" + File.basename(post.large_file_path)
f.call(key)
end
puts "#{post.id}"
key = File.basename(post.file_path)
f.call(key)
end
end
def self.execute
return false unless Danbooru.config.aws_s3_enabled?
last_id = AmazonBackup.last_id
credentials = Aws::Credentials.new(Danbooru.config.aws_access_key_id, Danbooru.config.aws_secret_access_key)
Aws.config.update({
region: "us-east-1",
credentials: credentials
})
logger = Logger.new(STDOUT)
client = Aws::S3::Client.new(logger: logger)
bucket = Danbooru.config.aws_s3_bucket_name
Post.where("id > ?", last_id).limit(1000).order("id").each do |post|
if File.exists?(post.file_path)
base64_md5 = Digest::MD5.base64digest(File.read(post.file_path))
key = File.basename(post.file_path)
body = open(post.file_path, "rb")
client.put_object(bucket: bucket, key: key, body: body, content_md5: base64_md5, acl: "public-read")
end
if post.has_preview? && File.exists?(post.preview_file_path)
key = "preview/#{post.md5}.jpg"
body = open(post.preview_file_path, "rb")
client.put_object(bucket: bucket, key: key, body: body, acl: "public-read")
end
if File.exists?(post.large_file_path)
key = "sample/#{Danbooru.config.large_image_prefix}#{post.md5}.#{post.large_file_ext}"
body = open(post.large_file_path, "rb")
client.put_object(bucket: bucket, key: key, body: body, acl: "public-read")
end
AmazonBackup.update_id(post.id)
end
rescue Exception => x
# probably some network error, retry next time
end
end