init commit

This commit is contained in:
2025-11-30 13:01:24 -05:00
parent f4596a372d
commit 29355260ed
607 changed files with 136371 additions and 234 deletions

View File

@@ -0,0 +1,134 @@
package memopayload
import (
"context"
"log/slog"
"slices"
"github.com/pkg/errors"
"github.com/usememos/gomark/ast"
"github.com/usememos/gomark/parser"
"github.com/usememos/gomark/parser/tokenizer"
storepb "github.com/usememos/memos/proto/gen/store"
"github.com/usememos/memos/store"
)
type Runner struct {
Store *store.Store
}
func NewRunner(store *store.Store) *Runner {
return &Runner{
Store: store,
}
}
// RunOnce rebuilds the payload of all memos.
func (r *Runner) RunOnce(ctx context.Context) {
// Process memos in batches to avoid loading all memos into memory at once
const batchSize = 100
offset := 0
processed := 0
for {
limit := batchSize
memos, err := r.Store.ListMemos(ctx, &store.FindMemo{
Limit: &limit,
Offset: &offset,
})
if err != nil {
slog.Error("failed to list memos", "err", err)
return
}
// Break if no more memos
if len(memos) == 0 {
break
}
// Process batch
batchSuccessCount := 0
for _, memo := range memos {
if err := RebuildMemoPayload(memo); err != nil {
slog.Error("failed to rebuild memo payload", "err", err, "memoID", memo.ID)
continue
}
if err := r.Store.UpdateMemo(ctx, &store.UpdateMemo{
ID: memo.ID,
Payload: memo.Payload,
}); err != nil {
slog.Error("failed to update memo", "err", err, "memoID", memo.ID)
continue
}
batchSuccessCount++
}
processed += len(memos)
slog.Info("Processed memo batch", "batchSize", len(memos), "successCount", batchSuccessCount, "totalProcessed", processed)
// Move to next batch
offset += len(memos)
}
}
func RebuildMemoPayload(memo *store.Memo) error {
nodes, err := parser.Parse(tokenizer.Tokenize(memo.Content))
if err != nil {
return errors.Wrap(err, "failed to parse content")
}
if memo.Payload == nil {
memo.Payload = &storepb.MemoPayload{}
}
tags := []string{}
property := &storepb.MemoPayload_Property{}
TraverseASTNodes(nodes, func(node ast.Node) {
switch n := node.(type) {
case *ast.Tag:
tag := n.Content
if !slices.Contains(tags, tag) {
tags = append(tags, tag)
}
case *ast.Link, *ast.AutoLink:
property.HasLink = true
case *ast.TaskListItem:
property.HasTaskList = true
if !n.Complete {
property.HasIncompleteTasks = true
}
case *ast.CodeBlock:
property.HasCode = true
case *ast.EmbeddedContent:
// TODO: validate references.
property.References = append(property.References, n.ResourceName)
}
})
memo.Payload.Tags = tags
memo.Payload.Property = property
return nil
}
func TraverseASTNodes(nodes []ast.Node, fn func(ast.Node)) {
for _, node := range nodes {
fn(node)
switch n := node.(type) {
case *ast.Paragraph:
TraverseASTNodes(n.Children, fn)
case *ast.Heading:
TraverseASTNodes(n.Children, fn)
case *ast.Blockquote:
TraverseASTNodes(n.Children, fn)
case *ast.List:
TraverseASTNodes(n.Children, fn)
case *ast.OrderedListItem:
TraverseASTNodes(n.Children, fn)
case *ast.UnorderedListItem:
TraverseASTNodes(n.Children, fn)
case *ast.TaskListItem:
TraverseASTNodes(n.Children, fn)
case *ast.Bold:
TraverseASTNodes(n.Children, fn)
}
}
}

View File

@@ -0,0 +1,134 @@
package s3presign
import (
"context"
"log/slog"
"time"
"google.golang.org/protobuf/types/known/timestamppb"
"github.com/usememos/memos/plugin/storage/s3"
storepb "github.com/usememos/memos/proto/gen/store"
"github.com/usememos/memos/store"
)
type Runner struct {
Store *store.Store
}
func NewRunner(store *store.Store) *Runner {
return &Runner{
Store: store,
}
}
// Schedule runner every 12 hours.
const runnerInterval = time.Hour * 12
func (r *Runner) Run(ctx context.Context) {
ticker := time.NewTicker(runnerInterval)
defer ticker.Stop()
for {
select {
case <-ticker.C:
r.RunOnce(ctx)
case <-ctx.Done():
return
}
}
}
func (r *Runner) RunOnce(ctx context.Context) {
r.CheckAndPresign(ctx)
}
func (r *Runner) CheckAndPresign(ctx context.Context) {
workspaceStorageSetting, err := r.Store.GetWorkspaceStorageSetting(ctx)
if err != nil {
return
}
s3StorageType := storepb.AttachmentStorageType_S3
// Limit attachments to a reasonable batch size
const batchSize = 100
offset := 0
for {
limit := batchSize
attachments, err := r.Store.ListAttachments(ctx, &store.FindAttachment{
GetBlob: false,
StorageType: &s3StorageType,
Limit: &limit,
Offset: &offset,
})
if err != nil {
slog.Error("Failed to list attachments for presigning", "error", err)
return
}
// Break if no more attachments
if len(attachments) == 0 {
break
}
// Process batch of attachments
presignCount := 0
for _, attachment := range attachments {
s3ObjectPayload := attachment.Payload.GetS3Object()
if s3ObjectPayload == nil {
continue
}
if s3ObjectPayload.LastPresignedTime != nil {
// Skip if the presigned URL is still valid for the next 4 days.
// The expiration time is set to 5 days.
if time.Now().Before(s3ObjectPayload.LastPresignedTime.AsTime().Add(4 * 24 * time.Hour)) {
continue
}
}
s3Config := workspaceStorageSetting.GetS3Config()
if s3ObjectPayload.S3Config != nil {
s3Config = s3ObjectPayload.S3Config
}
if s3Config == nil {
slog.Error("S3 config is not found")
continue
}
s3Client, err := s3.NewClient(ctx, s3Config)
if err != nil {
slog.Error("Failed to create S3 client", "error", err)
continue
}
presignURL, err := s3Client.PresignGetObject(ctx, s3ObjectPayload.Key)
if err != nil {
slog.Error("Failed to presign URL", "error", err, "attachmentID", attachment.ID)
continue
}
s3ObjectPayload.S3Config = s3Config
s3ObjectPayload.LastPresignedTime = timestamppb.New(time.Now())
if err := r.Store.UpdateAttachment(ctx, &store.UpdateAttachment{
ID: attachment.ID,
Reference: &presignURL,
Payload: &storepb.AttachmentPayload{
Payload: &storepb.AttachmentPayload_S3Object_{
S3Object: s3ObjectPayload,
},
},
}); err != nil {
slog.Error("Failed to update attachment", "error", err, "attachmentID", attachment.ID)
continue
}
presignCount++
}
slog.Info("Presigned batch of S3 attachments", "batchSize", len(attachments), "presigned", presignCount)
// Move to next batch
offset += len(attachments)
}
}