backup.go 7.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279
  1. package db
  2. import (
  3. "bufio"
  4. "fmt"
  5. "io"
  6. "os"
  7. "path/filepath"
  8. "reflect"
  9. "strings"
  10. "sync"
  11. jsoniter "github.com/json-iterator/go"
  12. "github.com/pkg/errors"
  13. "gorm.io/gorm"
  14. "gorm.io/gorm/schema"
  15. log "unknwon.dev/clog/v2"
  16. "xorm.io/core"
  17. "xorm.io/xorm"
  18. "gogs.io/gogs/internal/conf"
  19. "gogs.io/gogs/internal/osutil"
  20. )
  21. // getTableType returns the type name of a table definition without package name,
  22. // e.g. *db.LFSObject -> LFSObject.
  23. func getTableType(t interface{}) string {
  24. return strings.TrimPrefix(fmt.Sprintf("%T", t), "*db.")
  25. }
  26. // DumpDatabase dumps all data from database to file system in JSON Lines format.
  27. func DumpDatabase(db *gorm.DB, dirPath string, verbose bool) error {
  28. err := os.MkdirAll(dirPath, os.ModePerm)
  29. if err != nil {
  30. return err
  31. }
  32. err = dumpLegacyTables(dirPath, verbose)
  33. if err != nil {
  34. return errors.Wrap(err, "dump legacy tables")
  35. }
  36. for _, table := range Tables {
  37. tableName := getTableType(table)
  38. if verbose {
  39. log.Trace("Dumping table %q...", tableName)
  40. }
  41. err := func() error {
  42. tableFile := filepath.Join(dirPath, tableName+".json")
  43. f, err := os.Create(tableFile)
  44. if err != nil {
  45. return errors.Wrap(err, "create table file")
  46. }
  47. defer func() { _ = f.Close() }()
  48. return dumpTable(db, table, f)
  49. }()
  50. if err != nil {
  51. return errors.Wrapf(err, "dump table %q", tableName)
  52. }
  53. }
  54. return nil
  55. }
  56. func dumpTable(db *gorm.DB, table interface{}, w io.Writer) error {
  57. query := db.Model(table).Order("id ASC")
  58. switch table.(type) {
  59. case *LFSObject:
  60. query = db.Model(table).Order("repo_id, oid ASC")
  61. }
  62. rows, err := query.Rows()
  63. if err != nil {
  64. return errors.Wrap(err, "select rows")
  65. }
  66. defer func() { _ = rows.Close() }()
  67. for rows.Next() {
  68. elem := reflect.New(reflect.TypeOf(table).Elem()).Interface()
  69. err = db.ScanRows(rows, elem)
  70. if err != nil {
  71. return errors.Wrap(err, "scan rows")
  72. }
  73. err = jsoniter.NewEncoder(w).Encode(elem)
  74. if err != nil {
  75. return errors.Wrap(err, "encode JSON")
  76. }
  77. }
  78. return nil
  79. }
  80. func dumpLegacyTables(dirPath string, verbose bool) error {
  81. // Purposely create a local variable to not modify global variable
  82. legacyTables := append(legacyTables, new(Version))
  83. for _, table := range legacyTables {
  84. tableName := getTableType(table)
  85. if verbose {
  86. log.Trace("Dumping table %q...", tableName)
  87. }
  88. tableFile := filepath.Join(dirPath, tableName+".json")
  89. f, err := os.Create(tableFile)
  90. if err != nil {
  91. return fmt.Errorf("create JSON file: %v", err)
  92. }
  93. if err = x.Asc("id").Iterate(table, func(idx int, bean interface{}) (err error) {
  94. return jsoniter.NewEncoder(f).Encode(bean)
  95. }); err != nil {
  96. _ = f.Close()
  97. return fmt.Errorf("dump table '%s': %v", tableName, err)
  98. }
  99. _ = f.Close()
  100. }
  101. return nil
  102. }
  103. // ImportDatabase imports data from backup archive in JSON Lines format.
  104. func ImportDatabase(db *gorm.DB, dirPath string, verbose bool) error {
  105. err := importLegacyTables(dirPath, verbose)
  106. if err != nil {
  107. return errors.Wrap(err, "import legacy tables")
  108. }
  109. for _, table := range Tables {
  110. tableName := strings.TrimPrefix(fmt.Sprintf("%T", table), "*db.")
  111. err := func() error {
  112. tableFile := filepath.Join(dirPath, tableName+".json")
  113. if !osutil.IsFile(tableFile) {
  114. log.Info("Skipped table %q", tableName)
  115. return nil
  116. }
  117. if verbose {
  118. log.Trace("Importing table %q...", tableName)
  119. }
  120. f, err := os.Open(tableFile)
  121. if err != nil {
  122. return errors.Wrap(err, "open table file")
  123. }
  124. defer func() { _ = f.Close() }()
  125. return importTable(db, table, f)
  126. }()
  127. if err != nil {
  128. return errors.Wrapf(err, "import table %q", tableName)
  129. }
  130. }
  131. return nil
  132. }
  133. func importTable(db *gorm.DB, table interface{}, r io.Reader) error {
  134. err := db.Migrator().DropTable(table)
  135. if err != nil {
  136. return errors.Wrap(err, "drop table")
  137. }
  138. err = db.Migrator().AutoMigrate(table)
  139. if err != nil {
  140. return errors.Wrap(err, "auto migrate")
  141. }
  142. s, err := schema.Parse(table, &sync.Map{}, db.NamingStrategy)
  143. if err != nil {
  144. return errors.Wrap(err, "parse schema")
  145. }
  146. rawTableName := s.Table
  147. skipResetIDSeq := map[string]bool{
  148. "lfs_object": true,
  149. }
  150. scanner := bufio.NewScanner(r)
  151. for scanner.Scan() {
  152. elem := reflect.New(reflect.TypeOf(table).Elem()).Interface()
  153. err = jsoniter.Unmarshal(scanner.Bytes(), elem)
  154. if err != nil {
  155. return errors.Wrap(err, "unmarshal JSON to struct")
  156. }
  157. err = db.Create(elem).Error
  158. if err != nil {
  159. return errors.Wrap(err, "create row")
  160. }
  161. }
  162. // PostgreSQL needs manually reset table sequence for auto increment keys
  163. if conf.UsePostgreSQL && !skipResetIDSeq[rawTableName] {
  164. seqName := rawTableName + "_id_seq"
  165. if _, err = x.Exec(fmt.Sprintf(`SELECT setval('%s', COALESCE((SELECT MAX(id)+1 FROM "%s"), 1), false);`, seqName, rawTableName)); err != nil {
  166. return errors.Wrapf(err, "reset table %q.%q", rawTableName, seqName)
  167. }
  168. }
  169. return nil
  170. }
  171. func importLegacyTables(dirPath string, verbose bool) error {
  172. snakeMapper := core.SnakeMapper{}
  173. skipInsertProcessors := map[string]bool{
  174. "mirror": true,
  175. "milestone": true,
  176. }
  177. // Purposely create a local variable to not modify global variable
  178. legacyTables := append(legacyTables, new(Version))
  179. for _, table := range legacyTables {
  180. tableName := strings.TrimPrefix(fmt.Sprintf("%T", table), "*db.")
  181. tableFile := filepath.Join(dirPath, tableName+".json")
  182. if !osutil.IsFile(tableFile) {
  183. continue
  184. }
  185. if verbose {
  186. log.Trace("Importing table %q...", tableName)
  187. }
  188. if err := x.DropTables(table); err != nil {
  189. return fmt.Errorf("drop table %q: %v", tableName, err)
  190. } else if err = x.Sync2(table); err != nil {
  191. return fmt.Errorf("sync table %q: %v", tableName, err)
  192. }
  193. f, err := os.Open(tableFile)
  194. if err != nil {
  195. return fmt.Errorf("open JSON file: %v", err)
  196. }
  197. rawTableName := x.TableName(table)
  198. _, isInsertProcessor := table.(xorm.BeforeInsertProcessor)
  199. scanner := bufio.NewScanner(f)
  200. for scanner.Scan() {
  201. if err = jsoniter.Unmarshal(scanner.Bytes(), table); err != nil {
  202. return fmt.Errorf("unmarshal to struct: %v", err)
  203. }
  204. if _, err = x.Insert(table); err != nil {
  205. return fmt.Errorf("insert strcut: %v", err)
  206. }
  207. var meta struct {
  208. ID int64
  209. CreatedUnix int64
  210. DeadlineUnix int64
  211. ClosedDateUnix int64
  212. }
  213. if err = jsoniter.Unmarshal(scanner.Bytes(), &meta); err != nil {
  214. log.Error("Failed to unmarshal to map: %v", err)
  215. }
  216. // Reset created_unix back to the date save in archive because Insert method updates its value
  217. if isInsertProcessor && !skipInsertProcessors[rawTableName] {
  218. if _, err = x.Exec("UPDATE `"+rawTableName+"` SET created_unix=? WHERE id=?", meta.CreatedUnix, meta.ID); err != nil {
  219. log.Error("Failed to reset '%s.created_unix': %v", rawTableName, err)
  220. }
  221. }
  222. switch rawTableName {
  223. case "milestone":
  224. if _, err = x.Exec("UPDATE `"+rawTableName+"` SET deadline_unix=?, closed_date_unix=? WHERE id=?", meta.DeadlineUnix, meta.ClosedDateUnix, meta.ID); err != nil {
  225. log.Error("Failed to reset 'milestone.deadline_unix', 'milestone.closed_date_unix': %v", err)
  226. }
  227. }
  228. }
  229. // PostgreSQL needs manually reset table sequence for auto increment keys
  230. if conf.UsePostgreSQL {
  231. rawTableName := snakeMapper.Obj2Table(tableName)
  232. seqName := rawTableName + "_id_seq"
  233. if _, err = x.Exec(fmt.Sprintf(`SELECT setval('%s', COALESCE((SELECT MAX(id)+1 FROM "%s"), 1), false);`, seqName, rawTableName)); err != nil {
  234. return fmt.Errorf("reset table %q' sequence: %v", rawTableName, err)
  235. }
  236. }
  237. }
  238. return nil
  239. }