mirror of
https://github.com/fiatjaf/nak.git
synced 2025-12-22 06:28:54 +00:00
Compare commits
13 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
8f38468103 | ||
|
|
9bf728d850 | ||
|
|
8396738fe2 | ||
|
|
c1d1682d6e | ||
|
|
6f00ff4c73 | ||
|
|
68bbece3db | ||
|
|
a83b23d76b | ||
|
|
a288cc47a4 | ||
|
|
5ee7670ba8 | ||
|
|
b973b476bc | ||
|
|
252612b12f | ||
|
|
4b8b6bb3de | ||
|
|
df491be232 |
@@ -366,6 +366,7 @@ var bunker = &cli.Command{
|
||||
handlerWg.Add(len(relayURLs))
|
||||
for _, relayURL := range relayURLs {
|
||||
go func(relayURL string) {
|
||||
defer handlerWg.Done()
|
||||
if relay, _ := sys.Pool.EnsureRelay(relayURL); relay != nil {
|
||||
err := relay.Publish(ctx, eventResponse)
|
||||
printLock.Lock()
|
||||
@@ -375,7 +376,6 @@ var bunker = &cli.Command{
|
||||
log("* failed to send response: %s\n", err)
|
||||
}
|
||||
printLock.Unlock()
|
||||
handlerWg.Done()
|
||||
}
|
||||
}(relayURL)
|
||||
}
|
||||
|
||||
315
dekey.go
Normal file
315
dekey.go
Normal file
@@ -0,0 +1,315 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"slices"
|
||||
|
||||
"fiatjaf.com/nostr"
|
||||
"fiatjaf.com/nostr/nip44"
|
||||
"github.com/fatih/color"
|
||||
"github.com/urfave/cli/v3"
|
||||
)
|
||||
|
||||
var dekey = &cli.Command{
|
||||
Name: "dekey",
|
||||
Usage: "handles NIP-4E decoupled encryption keys",
|
||||
Description: "maybe this picture will explain better than I can do here for now: https://cdn.azzamo.net/89c543d261ad0d665c1dea78f91e527c2e39e7fe503b440265a3c47e63c9139f.png",
|
||||
DisableSliceFlagSeparator: true,
|
||||
Flags: append(defaultKeyFlags,
|
||||
&cli.StringFlag{
|
||||
Name: "device-name",
|
||||
Usage: "name of this device that will be published and displayed on other clients",
|
||||
Value: func() string {
|
||||
if hostname, err := os.Hostname(); err == nil {
|
||||
return "nak@" + hostname
|
||||
}
|
||||
return "nak@unknown"
|
||||
}(),
|
||||
},
|
||||
),
|
||||
Action: func(ctx context.Context, c *cli.Command) error {
|
||||
log(color.CyanString("gathering keyer from arguments...\n"))
|
||||
kr, _, err := gatherKeyerFromArguments(ctx, c)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
log(color.CyanString("getting user public key...\n"))
|
||||
userPub, err := kr.GetPublicKey(ctx)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get user public key: %w", err)
|
||||
}
|
||||
|
||||
configPath := c.String("config-path")
|
||||
deviceName := c.String("device-name")
|
||||
|
||||
log(color.YellowString("handling device key for %s...\n"), deviceName)
|
||||
// check if we already have a local-device secret key
|
||||
deviceKeyPath := filepath.Join(configPath, "dekey", "device-key")
|
||||
var deviceSec nostr.SecretKey
|
||||
if data, err := os.ReadFile(deviceKeyPath); err == nil {
|
||||
log(color.GreenString("found existing device key\n"))
|
||||
deviceSec, err = nostr.SecretKeyFromHex(string(data))
|
||||
if err != nil {
|
||||
return fmt.Errorf("invalid device key in %s: %w", deviceKeyPath, err)
|
||||
}
|
||||
} else {
|
||||
log(color.YellowString("generating new device key...\n"))
|
||||
// create one
|
||||
deviceSec = nostr.Generate()
|
||||
os.MkdirAll(filepath.Dir(deviceKeyPath), 0700)
|
||||
if err := os.WriteFile(deviceKeyPath, []byte(deviceSec.Hex()), 0600); err != nil {
|
||||
return fmt.Errorf("failed to write device key: %w", err)
|
||||
}
|
||||
log(color.GreenString("device key generated and stored\n"))
|
||||
}
|
||||
devicePub := deviceSec.Public()
|
||||
|
||||
// get relays for the user
|
||||
log(color.CyanString("fetching write relays for user...\n"))
|
||||
relays := sys.FetchWriteRelays(ctx, userPub)
|
||||
log(color.CyanString("connecting to %d relays...\n"), len(relays))
|
||||
relayList := connectToAllRelays(ctx, c, relays, nil, nostr.PoolOptions{})
|
||||
if len(relayList) == 0 {
|
||||
return fmt.Errorf("no relays to use")
|
||||
}
|
||||
log(color.GreenString("connected to %d relays\n"), len(relayList))
|
||||
|
||||
// check if kind:4454 is already published
|
||||
log(color.CyanString("checking for existing device registration (kind:4454)...\n"))
|
||||
events := sys.Pool.FetchMany(ctx, relays, nostr.Filter{
|
||||
Kinds: []nostr.Kind{4454},
|
||||
Authors: []nostr.PubKey{userPub},
|
||||
Tags: nostr.TagMap{
|
||||
"pubkey": []string{devicePub.Hex()},
|
||||
},
|
||||
}, nostr.SubscriptionOptions{Label: "nak-nip4e"})
|
||||
if len(events) == 0 {
|
||||
log(color.YellowString("no device registration found, publishing kind:4454...\n"))
|
||||
// publish kind:4454
|
||||
evt := nostr.Event{
|
||||
Kind: 4454,
|
||||
Content: "",
|
||||
CreatedAt: nostr.Now(),
|
||||
Tags: nostr.Tags{
|
||||
{"client", deviceName},
|
||||
{"pubkey", devicePub.Hex()},
|
||||
},
|
||||
}
|
||||
|
||||
// sign with main key
|
||||
if err := kr.SignEvent(ctx, &evt); err != nil {
|
||||
return fmt.Errorf("failed to sign device event: %w", err)
|
||||
}
|
||||
|
||||
// publish
|
||||
if err := publishFlow(ctx, c, kr, evt, relayList); err != nil {
|
||||
return err
|
||||
}
|
||||
log(color.GreenString("device registration published\n"))
|
||||
} else {
|
||||
log(color.GreenString("device already registered\n"))
|
||||
}
|
||||
|
||||
// check for kind:10044
|
||||
log(color.CyanString("checking for user encryption key (kind:10044)...\n"))
|
||||
userKeyEventDate := nostr.Now()
|
||||
userKeyResult := sys.Pool.FetchManyReplaceable(ctx, relays, nostr.Filter{
|
||||
Kinds: []nostr.Kind{10044},
|
||||
Authors: []nostr.PubKey{userPub},
|
||||
}, nostr.SubscriptionOptions{Label: "nak-nip4e"})
|
||||
var eSec nostr.SecretKey
|
||||
var ePub nostr.PubKey
|
||||
if userKeyEvent, ok := userKeyResult.Load(nostr.ReplaceableKey{PubKey: userPub, D: ""}); !ok {
|
||||
log(color.YellowString("no user encryption key found, generating new one...\n"))
|
||||
// generate main secret key
|
||||
eSec = nostr.Generate()
|
||||
ePub := eSec.Public()
|
||||
|
||||
// store it
|
||||
eKeyPath := filepath.Join(configPath, "dekey", "e", ePub.Hex())
|
||||
os.MkdirAll(filepath.Dir(eKeyPath), 0700)
|
||||
if err := os.WriteFile(eKeyPath, []byte(eSec.Hex()), 0600); err != nil {
|
||||
return fmt.Errorf("failed to write user encryption key: %w", err)
|
||||
}
|
||||
log(color.GreenString("user encryption key generated and stored\n"))
|
||||
|
||||
// publish kind:10044
|
||||
log(color.YellowString("publishing user encryption key (kind:10044)...\n"))
|
||||
evt10044 := nostr.Event{
|
||||
Kind: 10044,
|
||||
Content: "",
|
||||
CreatedAt: userKeyEventDate,
|
||||
Tags: nostr.Tags{
|
||||
{"n", ePub.Hex()},
|
||||
},
|
||||
}
|
||||
if err := kr.SignEvent(ctx, &evt10044); err != nil {
|
||||
return fmt.Errorf("failed to sign kind:10044: %w", err)
|
||||
}
|
||||
|
||||
if err := publishFlow(ctx, c, kr, evt10044, relayList); err != nil {
|
||||
return err
|
||||
}
|
||||
log(color.GreenString("user encryption key published\n"))
|
||||
} else {
|
||||
log(color.GreenString("found existing user encryption key\n"))
|
||||
userKeyEventDate = userKeyEvent.CreatedAt
|
||||
|
||||
// get the pub from the tag
|
||||
for _, tag := range userKeyEvent.Tags {
|
||||
if len(tag) >= 2 && tag[0] == "n" {
|
||||
ePub, _ = nostr.PubKeyFromHex(tag[1])
|
||||
break
|
||||
}
|
||||
}
|
||||
if ePub == nostr.ZeroPK {
|
||||
return fmt.Errorf("invalid kind:10044 event, no 'n' tag")
|
||||
}
|
||||
|
||||
// check if we have the key
|
||||
eKeyPath := filepath.Join(configPath, "dekey", "e", ePub.Hex())
|
||||
if data, err := os.ReadFile(eKeyPath); err == nil {
|
||||
log(color.GreenString("found stored user encryption key\n"))
|
||||
eSec, err = nostr.SecretKeyFromHex(string(data))
|
||||
if err != nil {
|
||||
return fmt.Errorf("invalid main key: %w", err)
|
||||
}
|
||||
if eSec.Public() != ePub {
|
||||
return fmt.Errorf("stored user encryption key is corrupted: %w", err)
|
||||
}
|
||||
} else {
|
||||
log(color.YellowString("user encryption key not stored locally, attempting to decrypt from other devices...\n"))
|
||||
// try to decrypt from kind:4455
|
||||
for eKeyMsg := range sys.Pool.FetchMany(ctx, relays, nostr.Filter{
|
||||
Kinds: []nostr.Kind{4455},
|
||||
Tags: nostr.TagMap{
|
||||
"p": []string{devicePub.Hex()},
|
||||
},
|
||||
}, nostr.SubscriptionOptions{Label: "nak-nip4e"}) {
|
||||
var senderPub nostr.PubKey
|
||||
for _, tag := range eKeyMsg.Tags {
|
||||
if len(tag) >= 2 && tag[0] == "P" {
|
||||
senderPub, _ = nostr.PubKeyFromHex(tag[1])
|
||||
break
|
||||
}
|
||||
}
|
||||
if senderPub == nostr.ZeroPK {
|
||||
continue
|
||||
}
|
||||
ss, err := nip44.GenerateConversationKey(senderPub, deviceSec)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
eSecHex, err := nip44.Decrypt(eKeyMsg.Content, ss)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
eSec, err = nostr.SecretKeyFromHex(eSecHex)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
// check if it matches mainPub
|
||||
if eSec.Public() == ePub {
|
||||
log(color.GreenString("successfully decrypted user encryption key from another device\n"))
|
||||
// store it
|
||||
os.MkdirAll(filepath.Dir(eKeyPath), 0700)
|
||||
os.WriteFile(eKeyPath, []byte(eSecHex), 0600)
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if eSec == [32]byte{} {
|
||||
log(color.RedString("main secret key not available, must authorize on another device\n"))
|
||||
return nil
|
||||
}
|
||||
log(color.GreenString("user encryption key ready\n"))
|
||||
|
||||
// now we have mainSec, check for other kind:4454 events newer than the 10044
|
||||
log(color.CyanString("checking for other devices and key messages...\n"))
|
||||
keyMsgs := make([]string, 0, 5)
|
||||
for keyOrDeviceEvt := range sys.Pool.FetchMany(ctx, relays, nostr.Filter{
|
||||
Kinds: []nostr.Kind{4454, 4455},
|
||||
Authors: []nostr.PubKey{userPub},
|
||||
Since: userKeyEventDate,
|
||||
}, nostr.SubscriptionOptions{Label: "nak-nip4e"}) {
|
||||
if keyOrDeviceEvt.Kind == 4455 {
|
||||
// key event
|
||||
log(color.BlueString("received key message (kind:4455)\n"))
|
||||
|
||||
// skip ourselves
|
||||
if keyOrDeviceEvt.Tags.FindWithValue("p", devicePub.Hex()) != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
// assume a key msg will always come before its associated devicemsg
|
||||
// so just store them here:
|
||||
pubkeyTag := keyOrDeviceEvt.Tags.Find("p")
|
||||
if pubkeyTag == nil {
|
||||
continue
|
||||
}
|
||||
keyMsgs = append(keyMsgs, pubkeyTag[1])
|
||||
} else if keyOrDeviceEvt.Kind == 4454 {
|
||||
// device event
|
||||
log(color.BlueString("received device registration (kind:4454)\n"))
|
||||
|
||||
// skip ourselves
|
||||
if keyOrDeviceEvt.Tags.FindWithValue("pubkey", devicePub.Hex()) != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
// if this already has a corresponding keyMsg then skip it
|
||||
pubkeyTag := keyOrDeviceEvt.Tags.Find("pubkey")
|
||||
if pubkeyTag == nil {
|
||||
continue
|
||||
}
|
||||
if slices.Contains(keyMsgs, pubkeyTag[1]) {
|
||||
continue
|
||||
}
|
||||
|
||||
// here we know we're dealing with a deviceMsg without a corresponding keyMsg
|
||||
// so we have to build a keyMsg for them
|
||||
log(color.YellowString("sending encryption key to new device...\n"))
|
||||
theirDevice, err := nostr.PubKeyFromHex(pubkeyTag[1])
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
ss, err := nip44.GenerateConversationKey(theirDevice, deviceSec)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
ciphertext, err := nip44.Encrypt(eSec.Hex(), ss)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
evt4455 := nostr.Event{
|
||||
Kind: 4455,
|
||||
Content: ciphertext,
|
||||
CreatedAt: nostr.Now(),
|
||||
Tags: nostr.Tags{
|
||||
{"p", theirDevice.Hex()},
|
||||
{"P", devicePub.Hex()},
|
||||
},
|
||||
}
|
||||
if err := kr.SignEvent(ctx, &evt4455); err != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
if err := publishFlow(ctx, c, kr, evt4455, relayList); err != nil {
|
||||
log(color.RedString("failed to publish key message: %v\n"), err)
|
||||
} else {
|
||||
log(color.GreenString("encryption key sent to device\n"))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
},
|
||||
}
|
||||
@@ -9,7 +9,7 @@ import (
|
||||
"github.com/urfave/cli/v3"
|
||||
)
|
||||
|
||||
var filter = &cli.Command{
|
||||
var filterCmd = &cli.Command{
|
||||
Name: "filter",
|
||||
Usage: "applies an event filter to an event to see if it matches.",
|
||||
Description: `
|
||||
|
||||
56
git.go
56
git.go
@@ -455,11 +455,17 @@ aside from those, there is also:
|
||||
{
|
||||
Name: "push",
|
||||
Usage: "push git changes",
|
||||
Flags: append(defaultKeyFlags, &cli.BoolFlag{
|
||||
Name: "force",
|
||||
Aliases: []string{"f"},
|
||||
Usage: "force push to git remotes",
|
||||
}),
|
||||
Flags: append(defaultKeyFlags,
|
||||
&cli.BoolFlag{
|
||||
Name: "force",
|
||||
Aliases: []string{"f"},
|
||||
Usage: "force push to git remotes",
|
||||
},
|
||||
&cli.BoolFlag{
|
||||
Name: "tags",
|
||||
Usage: "push all refs under refs/tags",
|
||||
},
|
||||
),
|
||||
Action: func(ctx context.Context, c *cli.Command) error {
|
||||
// setup signer
|
||||
kr, _, err := gatherKeyerFromArguments(ctx, c)
|
||||
@@ -526,6 +532,37 @@ aside from those, there is also:
|
||||
log("- setting HEAD to branch %s\n", color.CyanString(remoteBranch))
|
||||
}
|
||||
|
||||
// add all refs/tags
|
||||
output, err := exec.Command("git", "show-ref", "--tags").Output()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get local tags: %s", err)
|
||||
} else {
|
||||
lines := strings.Split(strings.TrimSpace(string(output)), "\n")
|
||||
for _, line := range lines {
|
||||
line = strings.TrimSpace(line)
|
||||
if line == "" {
|
||||
continue
|
||||
}
|
||||
parts := strings.Fields(line)
|
||||
if len(parts) != 2 {
|
||||
continue
|
||||
}
|
||||
commitHash := parts[0]
|
||||
ref := parts[1]
|
||||
|
||||
tagName := strings.TrimPrefix(ref, "refs/tags/")
|
||||
|
||||
if !c.Bool("force") {
|
||||
// if --force is not passed then we can't overwrite tags
|
||||
if existingHash, exists := state.Tags[tagName]; exists && existingHash != commitHash {
|
||||
return fmt.Errorf("tag %s that is already published pointing to %s, call with --force to overwrite", tagName, existingHash)
|
||||
}
|
||||
}
|
||||
state.Tags[tagName] = commitHash
|
||||
log("- setting tag %s to commit %s\n", color.CyanString(tagName), color.CyanString(commitHash))
|
||||
}
|
||||
}
|
||||
|
||||
// create and sign the new state event
|
||||
newStateEvent := state.ToEvent()
|
||||
err = kr.SignEvent(ctx, &newStateEvent)
|
||||
@@ -553,6 +590,9 @@ aside from those, there is also:
|
||||
if c.Bool("force") {
|
||||
pushArgs = append(pushArgs, "--force")
|
||||
}
|
||||
if c.Bool("tags") {
|
||||
pushArgs = append(pushArgs, "--tags")
|
||||
}
|
||||
pushCmd := exec.Command("git", pushArgs...)
|
||||
pushCmd.Stderr = os.Stderr
|
||||
pushCmd.Stdout = os.Stdout
|
||||
@@ -1061,7 +1101,7 @@ func gitUpdateRefs(ctx context.Context, dir string, state nip34.RepositoryState)
|
||||
lines := strings.Split(string(output), "\n")
|
||||
for _, line := range lines {
|
||||
parts := strings.Fields(line)
|
||||
if len(parts) >= 2 && strings.Contains(parts[1], "refs/remotes/nip34/state/") {
|
||||
if len(parts) >= 2 && strings.Contains(parts[1], "refs/heads/nip34/state/") {
|
||||
delCmd := exec.Command("git", "update-ref", "-d", parts[1])
|
||||
if dir != "" {
|
||||
delCmd.Dir = dir
|
||||
@@ -1078,7 +1118,7 @@ func gitUpdateRefs(ctx context.Context, dir string, state nip34.RepositoryState)
|
||||
branchName = "refs/heads/" + branchName
|
||||
}
|
||||
|
||||
refName := "refs/remotes/nip34/state/" + strings.TrimPrefix(branchName, "refs/heads/")
|
||||
refName := "refs/heads/nip34/state/" + strings.TrimPrefix(branchName, "refs/heads/")
|
||||
updateCmd := exec.Command("git", "update-ref", refName, commit)
|
||||
if dir != "" {
|
||||
updateCmd.Dir = dir
|
||||
@@ -1091,7 +1131,7 @@ func gitUpdateRefs(ctx context.Context, dir string, state nip34.RepositoryState)
|
||||
// create ref for HEAD
|
||||
if state.HEAD != "" {
|
||||
if headCommit, ok := state.Branches[state.HEAD]; ok {
|
||||
headRefName := "refs/remotes/nip34/state/HEAD"
|
||||
headRefName := "refs/heads/nip34/state/HEAD"
|
||||
updateCmd := exec.Command("git", "update-ref", headRefName, headCommit)
|
||||
if dir != "" {
|
||||
updateCmd.Dir = dir
|
||||
|
||||
4
go.mod
4
go.mod
@@ -4,7 +4,7 @@ go 1.25
|
||||
|
||||
require (
|
||||
fiatjaf.com/lib v0.3.1
|
||||
fiatjaf.com/nostr v0.0.0-20251201232830-91548fa0a157
|
||||
fiatjaf.com/nostr v0.0.0-20251204122254-07061404918d
|
||||
github.com/AlecAivazis/survey/v2 v2.3.7
|
||||
github.com/bep/debounce v1.2.1
|
||||
github.com/btcsuite/btcd/btcec/v2 v2.3.6
|
||||
@@ -104,3 +104,5 @@ require (
|
||||
gopkg.in/yaml.v3 v3.0.1 // indirect
|
||||
rsc.io/qr v0.2.0 // indirect
|
||||
)
|
||||
|
||||
replace fiatjaf.com/nostr => ../nostrlib
|
||||
|
||||
2
go.sum
2
go.sum
@@ -2,6 +2,8 @@ fiatjaf.com/lib v0.3.1 h1:/oFQwNtFRfV+ukmOCxfBEAuayoLwXp4wu2/fz5iHpwA=
|
||||
fiatjaf.com/lib v0.3.1/go.mod h1:Ycqq3+mJ9jAWu7XjbQI1cVr+OFgnHn79dQR5oTII47g=
|
||||
fiatjaf.com/nostr v0.0.0-20251201232830-91548fa0a157 h1:14yLsO2HwpS2CLIKFvLMDp8tVEDahwdC8OeG6NGaL+M=
|
||||
fiatjaf.com/nostr v0.0.0-20251201232830-91548fa0a157/go.mod h1:ue7yw0zHfZj23Ml2kVSdBx0ENEaZiuvGxs/8VEN93FU=
|
||||
fiatjaf.com/nostr v0.0.0-20251204122254-07061404918d h1:xROmiuT7LrZk+/iGGeTqRI4liqJZrc87AWjsyHtbqDg=
|
||||
fiatjaf.com/nostr v0.0.0-20251204122254-07061404918d/go.mod h1:ue7yw0zHfZj23Ml2kVSdBx0ENEaZiuvGxs/8VEN93FU=
|
||||
github.com/AlecAivazis/survey/v2 v2.3.7 h1:6I/u8FvytdGsgonrYsVn2t8t4QiRnh6QSTqkkhIiSjQ=
|
||||
github.com/AlecAivazis/survey/v2 v2.3.7/go.mod h1:xUTIdE4KCOIjsBAE1JYsUPoCqYdZ1reCfTwbto0Fduo=
|
||||
github.com/FastFilter/xorfilter v0.2.1 h1:lbdeLG9BdpquK64ZsleBS8B4xO/QW1IM0gMzF7KaBKc=
|
||||
|
||||
4
main.go
4
main.go
@@ -28,7 +28,7 @@ var app = &cli.Command{
|
||||
Commands: []*cli.Command{
|
||||
event,
|
||||
req,
|
||||
filter,
|
||||
filterCmd,
|
||||
fetch,
|
||||
count,
|
||||
decode,
|
||||
@@ -40,6 +40,7 @@ var app = &cli.Command{
|
||||
bunker,
|
||||
serve,
|
||||
blossomCmd,
|
||||
dekey,
|
||||
encrypt,
|
||||
decrypt,
|
||||
gift,
|
||||
@@ -52,6 +53,7 @@ var app = &cli.Command{
|
||||
git,
|
||||
nip,
|
||||
syncCmd,
|
||||
spell,
|
||||
},
|
||||
Version: version,
|
||||
Flags: []cli.Flag{
|
||||
|
||||
207
req.go
207
req.go
@@ -9,6 +9,7 @@ import (
|
||||
"slices"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"fiatjaf.com/nostr"
|
||||
"fiatjaf.com/nostr/eventstore"
|
||||
@@ -77,11 +78,6 @@ example:
|
||||
Name: "paginate-interval",
|
||||
Usage: "time between queries when using --paginate",
|
||||
},
|
||||
&cli.UintFlag{
|
||||
Name: "paginate-global-limit",
|
||||
Usage: "global limit at which --paginate should stop",
|
||||
DefaultText: "uses the value given by --limit/-l or infinite",
|
||||
},
|
||||
&cli.BoolFlag{
|
||||
Name: "bare",
|
||||
Usage: "when printing the filter, print just the filter, not enveloped in a [\"REQ\", ...] array",
|
||||
@@ -226,89 +222,7 @@ example:
|
||||
}
|
||||
}
|
||||
} else {
|
||||
var results chan nostr.RelayEvent
|
||||
opts := nostr.SubscriptionOptions{
|
||||
Label: "nak-req",
|
||||
}
|
||||
|
||||
if c.Bool("paginate") {
|
||||
paginator := sys.Pool.PaginatorWithInterval(c.Duration("paginate-interval"))
|
||||
results = paginator(ctx, relayUrls, filter, opts)
|
||||
} else if c.Bool("outbox") {
|
||||
defs := make([]nostr.DirectedFilter, 0, len(filter.Authors)*2)
|
||||
|
||||
// hardcoded relays, if any
|
||||
for _, relayUrl := range relayUrls {
|
||||
defs = append(defs, nostr.DirectedFilter{
|
||||
Filter: filter,
|
||||
Relay: relayUrl,
|
||||
})
|
||||
}
|
||||
|
||||
// relays for each pubkey
|
||||
errg := errgroup.Group{}
|
||||
errg.SetLimit(16)
|
||||
mu := sync.Mutex{}
|
||||
for _, pubkey := range filter.Authors {
|
||||
errg.Go(func() error {
|
||||
n := int(c.Uint("outbox-relays-per-pubkey"))
|
||||
for _, url := range sys.FetchOutboxRelays(ctx, pubkey, n) {
|
||||
if slices.Contains(relayUrls, url) {
|
||||
// already hardcoded, ignore
|
||||
continue
|
||||
}
|
||||
if !nostr.IsValidRelayURL(url) {
|
||||
continue
|
||||
}
|
||||
|
||||
matchUrl := func(def nostr.DirectedFilter) bool { return def.Relay == url }
|
||||
idx := slices.IndexFunc(defs, matchUrl)
|
||||
if idx == -1 {
|
||||
// new relay, add it
|
||||
mu.Lock()
|
||||
// check again after locking to prevent races
|
||||
idx = slices.IndexFunc(defs, matchUrl)
|
||||
if idx == -1 {
|
||||
// then add it
|
||||
filter := filter.Clone()
|
||||
filter.Authors = []nostr.PubKey{pubkey}
|
||||
defs = append(defs, nostr.DirectedFilter{
|
||||
Filter: filter,
|
||||
Relay: url,
|
||||
})
|
||||
mu.Unlock()
|
||||
continue // done with this relay url
|
||||
}
|
||||
|
||||
// otherwise we'll just use the idx
|
||||
mu.Unlock()
|
||||
}
|
||||
|
||||
// existing relay, add this pubkey
|
||||
defs[idx].Authors = append(defs[idx].Authors, pubkey)
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
}
|
||||
errg.Wait()
|
||||
|
||||
if c.Bool("stream") {
|
||||
results = sys.Pool.BatchedSubscribeMany(ctx, defs, opts)
|
||||
} else {
|
||||
results = sys.Pool.BatchedQueryMany(ctx, defs, opts)
|
||||
}
|
||||
} else {
|
||||
if c.Bool("stream") {
|
||||
results = sys.Pool.SubscribeMany(ctx, relayUrls, filter, opts)
|
||||
} else {
|
||||
results = sys.Pool.FetchMany(ctx, relayUrls, filter, opts)
|
||||
}
|
||||
}
|
||||
|
||||
for ie := range results {
|
||||
stdout(ie.Event)
|
||||
}
|
||||
performReq(ctx, filter, relayUrls, c.Bool("stream"), c.Bool("outbox"), c.Uint("outbox-relays-per-pubkey"), c.Bool("paginate"), c.Duration("paginate-interval"), "nak-req")
|
||||
}
|
||||
} else {
|
||||
// no relays given, will just print the filter
|
||||
@@ -329,6 +243,123 @@ example:
|
||||
},
|
||||
}
|
||||
|
||||
func performReq(
|
||||
ctx context.Context,
|
||||
filter nostr.Filter,
|
||||
relayUrls []string,
|
||||
stream bool,
|
||||
outbox bool,
|
||||
outboxRelaysPerPubKey uint64,
|
||||
paginate bool,
|
||||
paginateInterval time.Duration,
|
||||
label string,
|
||||
) {
|
||||
var results chan nostr.RelayEvent
|
||||
var closeds chan nostr.RelayClosed
|
||||
|
||||
opts := nostr.SubscriptionOptions{
|
||||
Label: label,
|
||||
}
|
||||
|
||||
if paginate {
|
||||
paginator := sys.Pool.PaginatorWithInterval(paginateInterval)
|
||||
results = paginator(ctx, relayUrls, filter, opts)
|
||||
} else if outbox {
|
||||
defs := make([]nostr.DirectedFilter, 0, len(filter.Authors)*2)
|
||||
|
||||
for _, relayUrl := range relayUrls {
|
||||
defs = append(defs, nostr.DirectedFilter{
|
||||
Filter: filter,
|
||||
Relay: relayUrl,
|
||||
})
|
||||
}
|
||||
|
||||
// relays for each pubkey
|
||||
errg := errgroup.Group{}
|
||||
errg.SetLimit(16)
|
||||
mu := sync.Mutex{}
|
||||
logverbose("gathering outbox relays for %d authors...\n", len(filter.Authors))
|
||||
for _, pubkey := range filter.Authors {
|
||||
errg.Go(func() error {
|
||||
n := int(outboxRelaysPerPubKey)
|
||||
for _, url := range sys.FetchOutboxRelays(ctx, pubkey, n) {
|
||||
if slices.Contains(relayUrls, url) {
|
||||
// already specified globally, ignore
|
||||
continue
|
||||
}
|
||||
if !nostr.IsValidRelayURL(url) {
|
||||
continue
|
||||
}
|
||||
|
||||
matchUrl := func(def nostr.DirectedFilter) bool { return def.Relay == url }
|
||||
idx := slices.IndexFunc(defs, matchUrl)
|
||||
if idx == -1 {
|
||||
// new relay, add it
|
||||
mu.Lock()
|
||||
// check again after locking to prevent races
|
||||
idx = slices.IndexFunc(defs, matchUrl)
|
||||
if idx == -1 {
|
||||
// then add it
|
||||
filter := filter.Clone()
|
||||
filter.Authors = []nostr.PubKey{pubkey}
|
||||
defs = append(defs, nostr.DirectedFilter{
|
||||
Filter: filter,
|
||||
Relay: url,
|
||||
})
|
||||
mu.Unlock()
|
||||
continue // done with this relay url
|
||||
}
|
||||
|
||||
// otherwise we'll just use the idx
|
||||
mu.Unlock()
|
||||
}
|
||||
|
||||
// existing relay, add this pubkey
|
||||
defs[idx].Authors = append(defs[idx].Authors, pubkey)
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
}
|
||||
errg.Wait()
|
||||
|
||||
if stream {
|
||||
logverbose("running subscription with %d directed filters...\n", len(defs))
|
||||
results, closeds = sys.Pool.BatchedSubscribeManyNotifyClosed(ctx, defs, opts)
|
||||
} else {
|
||||
logverbose("running query with %d directed filters...\n", len(defs))
|
||||
results, closeds = sys.Pool.BatchedQueryManyNotifyClosed(ctx, defs, opts)
|
||||
}
|
||||
} else {
|
||||
if stream {
|
||||
logverbose("running subscription to %d relays...\n", len(relayUrls))
|
||||
results, closeds = sys.Pool.SubscribeManyNotifyClosed(ctx, relayUrls, filter, opts)
|
||||
} else {
|
||||
logverbose("running query to %d relays...\n", len(relayUrls))
|
||||
results, closeds = sys.Pool.FetchManyNotifyClosed(ctx, relayUrls, filter, opts)
|
||||
}
|
||||
}
|
||||
|
||||
readevents:
|
||||
for {
|
||||
select {
|
||||
case ie, ok := <-results:
|
||||
if !ok {
|
||||
break readevents
|
||||
}
|
||||
stdout(ie.Event)
|
||||
case closed := <-closeds:
|
||||
if closed.HandledAuth {
|
||||
logverbose("%s CLOSED: %s\n", closed.Relay.URL, closed.Reason)
|
||||
} else {
|
||||
log("%s CLOSED: %s\n", closed.Relay.URL, closed.Reason)
|
||||
}
|
||||
case <-ctx.Done():
|
||||
break readevents
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var reqFilterFlags = []cli.Flag{
|
||||
&PubKeySliceFlag{
|
||||
Name: "author",
|
||||
|
||||
17
serve.go
17
serve.go
@@ -51,6 +51,12 @@ var serve = &cli.Command{
|
||||
Name: "grasp",
|
||||
Usage: "enable grasp server",
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "grasp-path",
|
||||
Usage: "where to store the repositories",
|
||||
TakesFile: true,
|
||||
Hidden: true,
|
||||
},
|
||||
&cli.BoolFlag{
|
||||
Name: "blossom",
|
||||
Usage: "enable blossom server",
|
||||
@@ -135,10 +141,13 @@ var serve = &cli.Command{
|
||||
}
|
||||
|
||||
if c.Bool("grasp") {
|
||||
var err error
|
||||
repoDir, err = os.MkdirTemp("", "nak-serve-grasp-repos-")
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create grasp repos directory: %w", err)
|
||||
repoDir = c.String("grasp-path")
|
||||
if repoDir == "" {
|
||||
var err error
|
||||
repoDir, err = os.MkdirTemp("", "nak-serve-grasp-repos-")
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create grasp repos directory: %w", err)
|
||||
}
|
||||
}
|
||||
g := grasp.New(rl, repoDir)
|
||||
g.OnRead = func(ctx context.Context, pubkey nostr.PubKey, repo string) (reject bool, reason string) {
|
||||
|
||||
398
spell.go
Normal file
398
spell.go
Normal file
@@ -0,0 +1,398 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"fiatjaf.com/nostr"
|
||||
"fiatjaf.com/nostr/nip19"
|
||||
"fiatjaf.com/nostr/sdk/hints"
|
||||
"github.com/fatih/color"
|
||||
"github.com/markusmobius/go-dateparser"
|
||||
"github.com/urfave/cli/v3"
|
||||
)
|
||||
|
||||
var spell = &cli.Command{
|
||||
Name: "spell",
|
||||
Usage: "downloads a spell event and executes its REQ request",
|
||||
ArgsUsage: "[nevent_code]",
|
||||
Description: `fetches a spell event (kind 777) and executes REQ command encoded in its tags.`,
|
||||
Flags: append(defaultKeyFlags,
|
||||
&cli.UintFlag{
|
||||
Name: "outbox-relays-per-pubkey",
|
||||
Aliases: []string{"n"},
|
||||
Usage: "number of outbox relays to use for each pubkey",
|
||||
Value: 3,
|
||||
},
|
||||
),
|
||||
Action: func(ctx context.Context, c *cli.Command) error {
|
||||
// load history from file
|
||||
var history []SpellHistoryEntry
|
||||
historyPath, err := getSpellHistoryPath()
|
||||
if err == nil {
|
||||
file, err := os.Open(historyPath)
|
||||
if err == nil {
|
||||
defer file.Close()
|
||||
scanner := bufio.NewScanner(file)
|
||||
for scanner.Scan() {
|
||||
var entry SpellHistoryEntry
|
||||
if err := json.Unmarshal([]byte(scanner.Text()), &entry); err != nil {
|
||||
continue // skip invalid entries
|
||||
}
|
||||
history = append(history, entry)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if c.Args().Len() == 0 {
|
||||
log("recent spells:\n")
|
||||
for i, entry := range history {
|
||||
if i >= 10 {
|
||||
break
|
||||
}
|
||||
|
||||
displayName := entry.Name
|
||||
if displayName == "" {
|
||||
displayName = entry.Content
|
||||
if len(displayName) > 28 {
|
||||
displayName = displayName[:27] + "…"
|
||||
}
|
||||
}
|
||||
if displayName != "" {
|
||||
displayName = displayName + ": "
|
||||
}
|
||||
|
||||
desc := entry.Content
|
||||
if len(desc) > 50 {
|
||||
desc = desc[0:49] + "…"
|
||||
}
|
||||
|
||||
lastUsed := entry.LastUsed.Format("2006-01-02 15:04")
|
||||
stdout(fmt.Sprintf(" %s %s%s - %s\n",
|
||||
color.BlueString(entry.Identifier),
|
||||
displayName,
|
||||
color.YellowString(lastUsed),
|
||||
desc,
|
||||
))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// decode nevent to get the spell event
|
||||
var pointer nostr.EventPointer
|
||||
identifier := c.Args().First()
|
||||
prefix, value, err := nip19.Decode(identifier)
|
||||
if err == nil {
|
||||
if prefix != "nevent" {
|
||||
return fmt.Errorf("expected nevent code, got %s", prefix)
|
||||
}
|
||||
pointer = value.(nostr.EventPointer)
|
||||
} else {
|
||||
// search our history
|
||||
for _, entry := range history {
|
||||
if entry.Identifier == identifier {
|
||||
pointer = entry.Pointer
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if pointer.ID == nostr.ZeroID {
|
||||
return fmt.Errorf("invalid spell reference")
|
||||
}
|
||||
|
||||
// fetch spell
|
||||
relays := pointer.Relays
|
||||
if pointer.Author != nostr.ZeroPK {
|
||||
for _, url := range relays {
|
||||
sys.Hints.Save(pointer.Author, nostr.NormalizeURL(url), hints.LastInHint, nostr.Now())
|
||||
}
|
||||
relays = append(relays, sys.FetchOutboxRelays(ctx, pointer.Author, 3)...)
|
||||
}
|
||||
spell := sys.Pool.QuerySingle(ctx, relays, nostr.Filter{IDs: []nostr.ID{pointer.ID}},
|
||||
nostr.SubscriptionOptions{Label: "nak-spell-f"})
|
||||
if spell == nil {
|
||||
return fmt.Errorf("spell event not found")
|
||||
}
|
||||
if spell.Kind != 777 {
|
||||
return fmt.Errorf("event is not a spell (expected kind 777, got %d)", spell.Kind)
|
||||
}
|
||||
|
||||
// parse spell tags to build REQ filter
|
||||
spellFilter, err := buildSpellReq(ctx, c, spell.Tags)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to parse spell tags: %w", err)
|
||||
}
|
||||
|
||||
// determine relays to query
|
||||
var spellRelays []string
|
||||
var outbox bool
|
||||
relaysTag := spell.Event.Tags.Find("relays")
|
||||
if relaysTag == nil {
|
||||
// if this tag doesn't exist assume $outbox
|
||||
relaysTag = nostr.Tag{"relays", "$outbox"}
|
||||
}
|
||||
for i := 1; i < len(relaysTag); i++ {
|
||||
switch relaysTag[i] {
|
||||
case "$outbox":
|
||||
outbox = true
|
||||
default:
|
||||
relays = append(relays, relaysTag[i])
|
||||
}
|
||||
}
|
||||
|
||||
stream := !spell.Tags.Has("close-on-eose")
|
||||
|
||||
// fill in the author if we didn't have it
|
||||
pointer.Author = spell.PubKey
|
||||
|
||||
// add to history before execution
|
||||
{
|
||||
idStr := nip19.EncodeNevent(spell.ID, nil, nostr.ZeroPK)
|
||||
identifier = "spell" + idStr[len(idStr)-7:]
|
||||
nameTag := spell.Tags.Find("name")
|
||||
var name string
|
||||
if nameTag != nil {
|
||||
name = nameTag[1]
|
||||
}
|
||||
if len(history) > 100 {
|
||||
history = history[:100]
|
||||
}
|
||||
// write back to file
|
||||
file, err := os.Create(historyPath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
data, _ := json.Marshal(SpellHistoryEntry{
|
||||
Identifier: identifier,
|
||||
Name: name,
|
||||
Content: spell.Content,
|
||||
LastUsed: time.Now(),
|
||||
Pointer: pointer,
|
||||
})
|
||||
file.Write(data)
|
||||
file.Write([]byte{'\n'})
|
||||
for i, entry := range history {
|
||||
// limit history size (keep last 100)
|
||||
if i == 100 {
|
||||
break
|
||||
}
|
||||
|
||||
data, _ := json.Marshal(entry)
|
||||
file.Write(data)
|
||||
file.Write([]byte{'\n'})
|
||||
}
|
||||
file.Close()
|
||||
|
||||
logverbose("executing %s: %s relays=%v outbox=%v stream=%v\n",
|
||||
identifier, spellFilter, spellRelays, outbox, stream)
|
||||
}
|
||||
|
||||
// execute
|
||||
performReq(ctx, spellFilter, spellRelays, stream, outbox, c.Uint("outbox-relays-per-pubkey"), false, 0, "nak-spell")
|
||||
|
||||
return nil
|
||||
},
|
||||
}
|
||||
|
||||
func buildSpellReq(ctx context.Context, c *cli.Command, tags nostr.Tags) (nostr.Filter, error) {
|
||||
filter := nostr.Filter{}
|
||||
|
||||
getMe := func() (nostr.PubKey, error) {
|
||||
kr, _, err := gatherKeyerFromArguments(ctx, c)
|
||||
if err != nil {
|
||||
return nostr.ZeroPK, fmt.Errorf("failed to get keyer: %w", err)
|
||||
}
|
||||
|
||||
pubkey, err := kr.GetPublicKey(ctx)
|
||||
if err != nil {
|
||||
return nostr.ZeroPK, fmt.Errorf("failed to get public key from keyer: %w", err)
|
||||
}
|
||||
|
||||
return pubkey, nil
|
||||
}
|
||||
|
||||
for _, tag := range tags {
|
||||
if len(tag) == 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
switch tag[0] {
|
||||
case "cmd":
|
||||
if len(tag) < 2 || tag[1] != "REQ" {
|
||||
return nostr.Filter{}, fmt.Errorf("only REQ commands are supported")
|
||||
}
|
||||
|
||||
case "k":
|
||||
for i := 1; i < len(tag); i++ {
|
||||
if kind, err := strconv.Atoi(tag[i]); err == nil {
|
||||
filter.Kinds = append(filter.Kinds, nostr.Kind(kind))
|
||||
}
|
||||
}
|
||||
|
||||
case "authors":
|
||||
for i := 1; i < len(tag); i++ {
|
||||
switch tag[i] {
|
||||
case "$me":
|
||||
me, err := getMe()
|
||||
if err != nil {
|
||||
return nostr.Filter{}, err
|
||||
}
|
||||
filter.Authors = append(filter.Authors, me)
|
||||
case "$contacts":
|
||||
me, err := getMe()
|
||||
if err != nil {
|
||||
return nostr.Filter{}, err
|
||||
}
|
||||
for _, f := range sys.FetchFollowList(ctx, me).Items {
|
||||
filter.Authors = append(filter.Authors, f.Pubkey)
|
||||
}
|
||||
default:
|
||||
pubkey, err := nostr.PubKeyFromHex(tag[i])
|
||||
if err != nil {
|
||||
return nostr.Filter{}, fmt.Errorf("invalid pubkey '%s' in 'authors': %w", tag[i], err)
|
||||
}
|
||||
filter.Authors = append(filter.Authors, pubkey)
|
||||
}
|
||||
}
|
||||
|
||||
case "ids":
|
||||
for i := 1; i < len(tag); i++ {
|
||||
id, err := nostr.IDFromHex(tag[i])
|
||||
if err != nil {
|
||||
return nostr.Filter{}, fmt.Errorf("invalid id '%s' in 'authors': %w", tag[i], err)
|
||||
}
|
||||
filter.IDs = append(filter.IDs, id)
|
||||
}
|
||||
|
||||
case "tag":
|
||||
if len(tag) < 3 {
|
||||
continue
|
||||
}
|
||||
tagName := tag[1]
|
||||
if filter.Tags == nil {
|
||||
filter.Tags = make(nostr.TagMap)
|
||||
}
|
||||
for i := 2; i < len(tag); i++ {
|
||||
switch tag[i] {
|
||||
case "$me":
|
||||
me, err := getMe()
|
||||
if err != nil {
|
||||
return nostr.Filter{}, err
|
||||
}
|
||||
filter.Tags[tagName] = append(filter.Tags[tagName], me.Hex())
|
||||
case "$contacts":
|
||||
me, err := getMe()
|
||||
if err != nil {
|
||||
return nostr.Filter{}, err
|
||||
}
|
||||
for _, f := range sys.FetchFollowList(ctx, me).Items {
|
||||
filter.Tags[tagName] = append(filter.Tags[tagName], f.Pubkey.Hex())
|
||||
}
|
||||
default:
|
||||
filter.Tags[tagName] = append(filter.Tags[tagName], tag[i])
|
||||
}
|
||||
}
|
||||
|
||||
case "limit":
|
||||
if len(tag) >= 2 {
|
||||
if limit, err := strconv.Atoi(tag[1]); err == nil {
|
||||
filter.Limit = limit
|
||||
}
|
||||
}
|
||||
|
||||
case "since":
|
||||
if len(tag) >= 2 {
|
||||
date, err := dateparser.Parse(&dateparser.Configuration{
|
||||
DefaultTimezone: time.Local,
|
||||
CurrentTime: time.Now(),
|
||||
}, tag[1])
|
||||
if err != nil {
|
||||
return nostr.Filter{}, fmt.Errorf("invalid date %s: %w", tag[1], err)
|
||||
}
|
||||
filter.Since = nostr.Timestamp(date.Time.Unix())
|
||||
}
|
||||
|
||||
case "until":
|
||||
if len(tag) >= 2 {
|
||||
date, err := dateparser.Parse(&dateparser.Configuration{
|
||||
DefaultTimezone: time.Local,
|
||||
CurrentTime: time.Now(),
|
||||
}, tag[1])
|
||||
if err != nil {
|
||||
return nostr.Filter{}, fmt.Errorf("invalid date %s: %w", tag[1], err)
|
||||
}
|
||||
filter.Until = nostr.Timestamp(date.Time.Unix())
|
||||
}
|
||||
|
||||
case "search":
|
||||
if len(tag) >= 2 {
|
||||
filter.Search = tag[1]
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return filter, nil
|
||||
}
|
||||
|
||||
func parseRelativeTime(timeStr string) (nostr.Timestamp, error) {
|
||||
// Handle special cases
|
||||
switch timeStr {
|
||||
case "now":
|
||||
return nostr.Now(), nil
|
||||
}
|
||||
|
||||
// Try to parse as relative time (e.g., "7d", "1h", "30m")
|
||||
if strings.HasSuffix(timeStr, "d") {
|
||||
days := strings.TrimSuffix(timeStr, "d")
|
||||
if daysInt, err := strconv.Atoi(days); err == nil {
|
||||
return nostr.Now() - nostr.Timestamp(daysInt*24*60*60), nil
|
||||
}
|
||||
} else if strings.HasSuffix(timeStr, "h") {
|
||||
hours := strings.TrimSuffix(timeStr, "h")
|
||||
if hoursInt, err := strconv.Atoi(hours); err == nil {
|
||||
return nostr.Now() - nostr.Timestamp(hoursInt*60*60), nil
|
||||
}
|
||||
} else if strings.HasSuffix(timeStr, "m") {
|
||||
minutes := strings.TrimSuffix(timeStr, "m")
|
||||
if minutesInt, err := strconv.Atoi(minutes); err == nil {
|
||||
return nostr.Now() - nostr.Timestamp(minutesInt*60), nil
|
||||
}
|
||||
}
|
||||
|
||||
// try to parse as direct timestamp
|
||||
if ts, err := strconv.ParseInt(timeStr, 10, 64); err == nil {
|
||||
return nostr.Timestamp(ts), nil
|
||||
}
|
||||
|
||||
return 0, fmt.Errorf("invalid time format: %s", timeStr)
|
||||
}
|
||||
|
||||
type SpellHistoryEntry struct {
|
||||
Identifier string `json:"_id"`
|
||||
Name string `json:"name,omitempty"`
|
||||
Content string `json:"content,omitempty"`
|
||||
LastUsed time.Time `json:"last_used"`
|
||||
Pointer nostr.EventPointer `json:"pointer"`
|
||||
}
|
||||
|
||||
func getSpellHistoryPath() (string, error) {
|
||||
home, err := os.UserHomeDir()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
historyDir := filepath.Join(home, ".config", "nak", "spells")
|
||||
|
||||
// create directory if it doesn't exist
|
||||
if err := os.MkdirAll(historyDir, 0755); err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return filepath.Join(historyDir, "history"), nil
|
||||
}
|
||||
Reference in New Issue
Block a user