Compare commits

..

9 Commits
main ... main

Author SHA1 Message Date
Sytone 476155448e Merge branch 'main' of ssh://kolaente.dev:9022/vikunja/api into main 2021-06-28 11:11:12 -07:00
Sytone 5e53299386 Merge branch 'main' of ssh://kolaente.dev:9022/vikunja/api into main 2021-06-17 12:54:33 -07:00
Sytone ae90aa80aa Merge branch 'main' of ssh://kolaente.dev:9022/vikunja/api into main 2021-06-17 12:54:16 -07:00
Sytone 15f1b15dbf Update to match swagger formatting
Update to match yaml files
2021-06-02 10:36:45 -07:00
Sytone 302064b4bb Updated xorm to bigint
Update model to int64
Made value null by default
Updated tests to not set by default
Updated Swagger documents
2021-06-02 10:28:56 -07:00
Sytone e8135dcf75 Fix swagger issues
Add editor config for consistency
2021-06-02 10:09:45 -07:00
sytone d9c91bc03a Merge branch 'main' of ssh://kolaente.dev:9022/vikunja/api into main 2021-06-01 16:46:43 -07:00
sytone 350f59c479 Fix fmt issue 2021-05-27 12:59:04 -07:00
sytone 51f9d8f690 Add default list setting 2021-05-27 11:27:03 -07:00
1163 changed files with 26718 additions and 158679 deletions

View File

@ -1,15 +0,0 @@
files/
dist/
logs/
docs/
Dockerfile
docker-manifest.tmpl
docker-manifest-unstable.tmpl
*.db
*.zip
# Frontend
/frontend/node_modules/
/frontend/.direnv
/frontend/dist

1405
.drone.yml

File diff suppressed because it is too large Load Diff

790
.drone1.yml Normal file
View File

@ -0,0 +1,790 @@
kind: pipeline
name: testing
workspace:
base: /go
path: src/code.vikunja.io/api
services:
- name: test-mysql-unit
image: mariadb:10
environment:
MYSQL_ROOT_PASSWORD: vikunjatest
MYSQL_DATABASE: vikunjatest
- name: test-mysql-integration
image: mariadb:10
environment:
MYSQL_ROOT_PASSWORD: vikunjatest
MYSQL_DATABASE: vikunjatest
- name: test-mysql-migration
image: mariadb:10
environment:
MYSQL_ROOT_PASSWORD: vikunjatest
MYSQL_DATABASE: vikunjatest
- name: test-postgres-unit
image: postgres:12
environment:
POSTGRES_PASSWORD: vikunjatest
POSTGRES_DB: vikunjatest
- name: test-postgres-integration
image: postgres:12
environment:
POSTGRES_PASSWORD: vikunjatest
POSTGRES_DB: vikunjatest
- name: test-postgres-migration
image: postgres:12
environment:
POSTGRES_PASSWORD: vikunjatest
POSTGRES_DB: vikunjatest
trigger:
branch:
include:
- main
event:
include:
- push
- pull_request
steps:
- name: fetch-tags
image: docker:git
commands:
- git fetch --tags
# We're statically compiling the magefile to avoid race condition issues caused by multiple pipeline steps
# compiling the same magefile at the same time. It's also faster if each step does not need to compile it first.
- name: mage
image: vikunja/golang-build:latest
pull: true
environment:
GOPROXY: 'https://goproxy.kolaente.de'
commands:
- mage -compile ./mage-static
- env
when:
event: [ push, tag, pull_request ]
- name: build
image: vikunja/golang-build:latest
pull: true
environment:
GOPROXY: 'https://goproxy.kolaente.de'
depends_on: [ mage ]
commands:
- ./mage-static build:build
when:
event: [ push, tag, pull_request ]
- name: lint
image: vikunja/golang-build:latest
pull: true
environment:
GOPROXY: 'https://goproxy.kolaente.de'
depends_on: [ build ]
commands:
- wget -O - -q https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b $(go env GOPATH)/bin v1.31.0
- ./mage-static check:all
when:
event: [ push, tag, pull_request ]
- name: test-migration-prepare
image: kolaente/toolbox:latest
pull: true
commands:
# Get the latest version
- wget https://dl.vikunja.io/api/unstable/vikunja-unstable-linux-amd64-full.zip -q -O vikunja-latest.zip
- unzip vikunja-latest.zip vikunja-unstable-linux-amd64
- name: test-migration-sqlite
image: kolaente/toolbox:latest
pull: true
depends_on: [ test-migration-prepare, build ]
environment:
VIKUNJA_DATABASE_TYPE: sqlite
VIKUNJA_DATABASE_PATH: ./vikunja-migration-test.db
VIKUNJA_LOG_DATABASE: stdout
VIKUNJA_LOG_DATABASELEVEL: debug
commands:
- ./vikunja-unstable-linux-amd64 migrate
# Run the migrations from the binary build in the step before
- ./vikunja migrate
when:
event: [ push, tag, pull_request ]
- name: test-migration-mysql
image: kolaente/toolbox:latest
pull: true
depends_on: [ test-migration-prepare, build ]
environment:
VIKUNJA_DATABASE_TYPE: mysql
VIKUNJA_DATABASE_HOST: test-mysql-migration
VIKUNJA_DATABASE_USER: root
VIKUNJA_DATABASE_PASSWORD: vikunjatest
VIKUNJA_DATABASE_DATABASE: vikunjatest
VIKUNJA_LOG_DATABASE: stdout
VIKUNJA_LOG_DATABASELEVEL: debug
commands:
- ./vikunja-unstable-linux-amd64 migrate
# Run the migrations from the binary build in the step before
- ./vikunja migrate
when:
event: [ push, tag, pull_request ]
- name: test-migration-psql
image: kolaente/toolbox:latest
pull: true
depends_on: [ test-migration-prepare, build ]
environment:
VIKUNJA_DATABASE_TYPE: postgres
VIKUNJA_DATABASE_HOST: test-postgres-migration
VIKUNJA_DATABASE_USER: postgres
VIKUNJA_DATABASE_PASSWORD: vikunjatest
VIKUNJA_DATABASE_DATABASE: vikunjatest
VIKUNJA_DATABASE_SSLMODE: disable
VIKUNJA_LOG_DATABASE: stdout
VIKUNJA_LOG_DATABASELEVEL: debug
commands:
- ./vikunja-unstable-linux-amd64 migrate
# Run the migrations from the binary build in the step before
- ./vikunja migrate
when:
event: [ push, tag, pull_request ]
- name: test
image: vikunja/golang-build:latest
pull: true
environment:
GOPROXY: 'https://goproxy.kolaente.de'
commands:
- ./mage-static test:unit
depends_on: [ fetch-tags, mage ]
when:
event: [ push, tag, pull_request ]
- name: test-sqlite
image: vikunja/golang-build:latest
pull: true
environment:
GOPROXY: 'https://goproxy.kolaente.de'
VIKUNJA_TESTS_USE_CONFIG: 1
VIKUNJA_DATABASE_TYPE: sqlite
commands:
- ./mage-static test:unit
depends_on: [ fetch-tags, mage ]
when:
event: [ push, tag, pull_request ]
- name: test-mysql
image: vikunja/golang-build:latest
pull: true
environment:
GOPROXY: 'https://goproxy.kolaente.de'
VIKUNJA_TESTS_USE_CONFIG: 1
VIKUNJA_DATABASE_TYPE: mysql
VIKUNJA_DATABASE_HOST: test-mysql-unit
VIKUNJA_DATABASE_USER: root
VIKUNJA_DATABASE_PASSWORD: vikunjatest
VIKUNJA_DATABASE_DATABASE: vikunjatest
commands:
- ./mage-static test:unit
depends_on: [ fetch-tags, mage ]
when:
event: [ push, tag, pull_request ]
- name: test-postgres
image: vikunja/golang-build:latest
pull: true
environment:
GOPROXY: 'https://goproxy.kolaente.de'
VIKUNJA_TESTS_USE_CONFIG: 1
VIKUNJA_DATABASE_TYPE: postgres
VIKUNJA_DATABASE_HOST: test-postgres-unit
VIKUNJA_DATABASE_USER: postgres
VIKUNJA_DATABASE_PASSWORD: vikunjatest
VIKUNJA_DATABASE_DATABASE: vikunjatest
VIKUNJA_DATABASE_SSLMODE: disable
commands:
- ./mage-static test:unit
depends_on: [ fetch-tags, mage ]
when:
event: [ push, tag, pull_request ]
- name: integration-test
image: vikunja/golang-build:latest
pull: true
environment:
GOPROXY: 'https://goproxy.kolaente.de'
commands:
- ./mage-static test:integration
depends_on: [ fetch-tags, mage ]
when:
event: [ push, tag, pull_request ]
- name: integration-test-sqlite
image: vikunja/golang-build:latest
pull: true
environment:
GOPROXY: 'https://goproxy.kolaente.de'
VIKUNJA_TESTS_USE_CONFIG: 1
VIKUNJA_DATABASE_TYPE: sqlite
commands:
- ./mage-static test:integration
depends_on: [ fetch-tags, mage ]
when:
event: [ push, tag, pull_request ]
- name: integration-test-mysql
image: vikunja/golang-build:latest
pull: true
environment:
GOPROXY: 'https://goproxy.kolaente.de'
VIKUNJA_TESTS_USE_CONFIG: 1
VIKUNJA_DATABASE_TYPE: mysql
VIKUNJA_DATABASE_HOST: test-mysql-integration
VIKUNJA_DATABASE_USER: root
VIKUNJA_DATABASE_PASSWORD: vikunjatest
VIKUNJA_DATABASE_DATABASE: vikunjatest
commands:
- ./mage-static test:integration
depends_on: [ fetch-tags, mage ]
when:
event: [ push, tag, pull_request ]
- name: integration-test-postgres
image: vikunja/golang-build:latest
pull: true
environment:
GOPROXY: 'https://goproxy.kolaente.de'
VIKUNJA_TESTS_USE_CONFIG: 1
VIKUNJA_DATABASE_TYPE: postgres
VIKUNJA_DATABASE_HOST: test-postgres-integration
VIKUNJA_DATABASE_USER: postgres
VIKUNJA_DATABASE_PASSWORD: vikunjatest
VIKUNJA_DATABASE_DATABASE: vikunjatest
VIKUNJA_DATABASE_SSLMODE: disable
commands:
- ./mage-static test:integration
depends_on: [ fetch-tags, mage ]
when:
event: [ push, tag, pull_request ]
---
########
# Build a release when tagging
########
kind: pipeline
name: release
depends_on:
- testing
workspace:
base: /go
path: src/code.vikunja.io/api
trigger:
ref:
- refs/heads/main
- "refs/tags/**"
steps:
# Needed to get the versions right as they depend on tags
- name: fetch-tags
image: docker:git
commands:
- git fetch --tags
# We're statically compiling the magefile to avoid race condition issues caused by multiple pipeline steps
# compiling the same magefile at the same time. It's also faster if each step does not need to compile it first.
- name: mage
image: vikunja/golang-build:latest
pull: true
environment:
GOPROXY: 'https://goproxy.kolaente.de'
commands:
- mage -compile ./mage-static
when:
event: [ push, tag, pull_request ]
- name: before-static-build
image: techknowlogick/xgo:latest
pull: true
commands:
- export PATH=$PATH:$GOPATH/bin
- go install github.com/magefile/mage
- ./mage-static release:dirs
depends_on: [ fetch-tags, mage ]
- name: static-build-windows
image: techknowlogick/xgo:latest
pull: true
environment:
# This path does not exist. However, when we set the gopath to /go, the build fails. Not sure why.
# Leaving this here until we know how to resolve this properly.
GOPATH: /srv/app
commands:
- export PATH=$PATH:$GOPATH/bin
- go install github.com/magefile/mage
- ./mage-static release:windows
depends_on: [ before-static-build ]
- name: static-build-linux
image: techknowlogick/xgo:latest
pull: true
environment:
# This path does not exist. However, when we set the gopath to /go, the build fails. Not sure why.
# Leaving this here until we know how to resolve this properly.
GOPATH: /srv/app
commands:
- export PATH=$PATH:$GOPATH/bin
- go install github.com/magefile/mage
- ./mage-static release:linux
depends_on: [ before-static-build ]
- name: static-build-darwin
image: techknowlogick/xgo:latest
pull: true
environment:
# This path does not exist. However, when we set the gopath to /go, the build fails. Not sure why.
# Leaving this here until we know how to resolve this properly.
GOPATH: /srv/app
commands:
- export PATH=$PATH:$GOPATH/bin
- go install github.com/magefile/mage
- ./mage-static release:darwin
depends_on: [ before-static-build ]
- name: after-build-compress
image: kolaente/upx
pull: true
depends_on:
- static-build-windows
- static-build-linux
- static-build-darwin
commands:
- ./mage-static release:compress
- name: after-build-static
image: techknowlogick/xgo:latest
pull: true
depends_on:
- after-build-compress
commands:
- go install github.com/magefile/mage
- ./mage-static release:copy
- ./mage-static release:check
- ./mage-static release:os-package
- ./mage-static release:zip
- name: sign-release
image: plugins/gpgsign:1
pull: true
depends_on: [ after-build-static ]
settings:
key:
from_secret: gpg_privkey
passphrase:
from_secret: gpg_password
files:
- dist/zip/*
detach_sign: true
# Push the releases to our pseudo-s3-bucket
- name: release-latest
image: plugins/s3:1
pull: true
settings:
bucket: vikunja-releases
access_key:
from_secret: aws_access_key_id
secret_key:
from_secret: aws_secret_access_key
endpoint: https://s3.fr-par.scw.cloud
region: fr-par
path_style: true
strip_prefix: dist/zip/
source: dist/zip/*
target: /api/unstable/
when:
branch:
- main
event:
- push
depends_on: [ sign-release ]
- name: release-version
image: plugins/s3:1
pull: true
settings:
bucket: vikunja-releases
access_key:
from_secret: aws_access_key_id
secret_key:
from_secret: aws_secret_access_key
endpoint: https://s3.fr-par.scw.cloud
region: fr-par
path_style: true
strip_prefix: dist/zip/
source: dist/zip/*
target: /api/${DRONE_TAG##v}/
when:
event:
- tag
depends_on: [ sign-release ]
# Build os packages and push it to our bucket
- name: build-os-packages
image: goreleaser/nfpm
pull: true
commands:
- apk add git go
- ./mage-static release:packages
- mv dist/os-packages/vikunja*.x86_64.rpm dist/os-packages/vikunja-unstable-x86_64.rpm
- mv dist/os-packages/vikunja*_amd64.deb dist/os-packages/vikunja-unstable-amd64.deb
- mv dist/os-packages/vikunja*_x86_64.apk dist/os-packages/vikunja-unstable-x86_64.apk
depends_on: [ static-build-linux ]
# Push the os releases to our pseudo-s3-bucket
- name: release-os-latest
image: plugins/s3:1
pull: true
settings:
bucket: vikunja-releases
access_key:
from_secret: aws_access_key_id
secret_key:
from_secret: aws_secret_access_key
endpoint: https://s3.fr-par.scw.cloud
region: fr-par
path_style: true
strip_prefix: dist/os-packages/
source: dist/os-packages/*
target: /api/unstable/
when:
branch:
- main
event:
- push
depends_on: [ build-os-packages ]
- name: release-os-version
image: plugins/s3:1
pull: true
settings:
bucket: vikunja-releases
access_key:
from_secret: aws_access_key_id
secret_key:
from_secret: aws_secret_access_key
endpoint: https://s3.fr-par.scw.cloud
region: fr-par
path_style: true
strip_prefix: dist/os-packages/
source: dist/os-packages/*
target: /api/${DRONE_TAG##v}/
when:
event:
- tag
depends_on: [ build-os-packages ]
### Broken, disabled until we figure out how to fix it
# - name: deb-structure
# image: kolaente/reprepro
# pull: true
# environment:
# GPG_PRIVATE_KEY:
# from_secret: gpg_privatekey
# commands:
# - export GPG_TTY=$(tty)
# - gpg -qk
# - echo "use-agent" >> ~/.gnupg/gpg.conf
# - gpgconf --kill gpg-agent
# - echo $GPG_PRIVATE_KEY > ~/frederik.gpg
# - gpg --import ~/frederik.gpg
# - mkdir debian/conf -p
# - cp build/reprepro-dist-conf debian/conf/distributions
# - ./mage-static release:reprepro
# depends_on: [ build-os-packages ]
# Push the releases to our pseudo-s3-bucket
- name: release-deb
image: plugins/s3:1
pull: true
settings:
bucket: vikunja-releases
access_key:
from_secret: aws_access_key_id
secret_key:
from_secret: aws_secret_access_key
endpoint: https://s3.fr-par.scw.cloud
region: fr-par
path_style: true
strip_prefix: debian
source: debian/*/*/*/*/*
target: /deb/
# depends_on: [ deb-structure ]
---
kind: pipeline
name: deploy-docs
workspace:
base: /go
path: src/code.vikunja.io/api
clone:
depth: 50
trigger:
event:
- push
branch:
- main
steps:
- name: theme
image: kolaente/toolbox
pull: true
group: build-static
commands:
- mkdir docs/themes/vikunja -p
- cd docs/themes/vikunja
- wget https://dl.vikunja.io/theme/vikunja-theme.tar.gz
- tar -xzf vikunja-theme.tar.gz
- name: build
image: monachus/hugo:v0.54.0
pull: true
commands:
- cd docs
- hugo
- mv public/docs/* public # Hugo seems to be not capable of setting a different theme for a home page, so we do this ugly hack to fix it.
- name: docker
image: plugins/docker
pull: true
settings:
username:
from_secret: docker_username
password:
from_secret: docker_password
repo: vikunja/docs
context: docs/
dockerfile: docs/Dockerfile
---
kind: pipeline
type: docker
name: docker-arm-release
depends_on:
- testing
platform:
os: linux
arch: arm64
trigger:
ref:
- refs/heads/main
- "refs/tags/**"
steps:
- name: fetch-tags
image: docker:git
commands:
- git fetch --tags
- name: docker-arm-latest
image: plugins/docker:linux-arm
pull: true
settings:
username:
from_secret: docker_username
password:
from_secret: docker_password
repo: vikunja/api
tags: latest-linux-arm
depends_on: [ fetch-tags ]
when:
ref:
- refs/heads/main
- name: docker-arm
image: plugins/docker:linux-arm
pull: true
settings:
username:
from_secret: docker_username
password:
from_secret: docker_password
repo: vikunja/api
auto_tag: true
auto_tag_suffix: linux-arm
depends_on: [ fetch-tags ]
when:
ref:
- "refs/tags/**"
- name: docker-arm64-latest
image: plugins/docker:linux-arm64
pull: true
settings:
username:
from_secret: docker_username
password:
from_secret: docker_password
repo: vikunja/api
tags: latest-linux-arm64
depends_on: [ fetch-tags ]
when:
ref:
- refs/heads/main
- name: docker-arm64
image: plugins/docker:linux-arm64
pull: true
settings:
username:
from_secret: docker_username
password:
from_secret: docker_password
repo: vikunja/api
auto_tag: true
auto_tag_suffix: linux-arm64
depends_on: [ fetch-tags ]
when:
ref:
- "refs/tags/**"
---
kind: pipeline
type: docker
name: docker-amd64-release
depends_on:
- testing
platform:
os: linux
arch: amd64
trigger:
ref:
- refs/heads/main
- "refs/tags/**"
steps:
- name: fetch-tags
image: docker:git
commands:
- git fetch --tags
- name: docker-latest
image: plugins/docker:linux-amd64
pull: true
settings:
username:
from_secret: docker_username
password:
from_secret: docker_password
repo: vikunja/api
tags: latest-linux-amd64
depends_on: [ fetch-tags ]
when:
ref:
- refs/heads/main
- name: docker
image: plugins/docker:linux-amd64
pull: true
settings:
username:
from_secret: docker_username
password:
from_secret: docker_password
repo: vikunja/api
auto_tag: true
auto_tag_suffix: linux-amd64
depends_on: [ fetch-tags ]
when:
ref:
- "refs/tags/**"
---
kind: pipeline
type: docker
name: docker-manifest
trigger:
ref:
- refs/heads/main
- "refs/tags/**"
depends_on:
- docker-amd64-release
- docker-arm-release
steps:
- name: manifest-latest
pull: always
image: plugins/manifest
settings:
tags: latest
ignore_missing: true
spec: docker-manifest-latest.tmpl
password:
from_secret: docker_password
username:
from_secret: docker_username
when:
ref:
- refs/heads/main
- name: manifest
pull: always
image: plugins/manifest
settings:
auto_tag: true
ignore_missing: true
spec: docker-manifest.tmpl
password:
from_secret: docker_password
username:
from_secret: docker_username
when:
ref:
- "refs/tags/**"
---
kind: pipeline
type: docker
name: notify
trigger:
ref:
- refs/heads/main
- "refs/tags/**"
depends_on:
- testing
- release
- deploy-docs
- docker-arm-release
- docker-amd64-release
- docker-manifest
steps:
- name: notify
image: plugins/matrix
settings:
homeserver: https://matrix.org
roomid: WqBDCxzghKcNflkErL:matrix.org
username:
from_secret: matrix_username
password:
from_secret: matrix_password
when:
status:
- success
- failure

1
.envrc
View File

@ -1 +0,0 @@
use flake

View File

@ -0,0 +1,11 @@
# Description
# Checklist
* [ ] I added or improved tests
* [ ] I added or improved docs for my feature
* [ ] Swagger (including `mage do-the-swag`)
* [ ] Error codes
* [ ] New config options (including adding them to `config.yml.saml` and running `mage generate-docs`)

3
.github/FUNDING.yml vendored
View File

@ -1,3 +1,2 @@
github: kolaente
open_collective: vikunja
custom: ["https://vikunja.cloud", "https://www.buymeacoffee.com/kolaente"]
custom: https://www.buymeacoffee.com/kolaente

View File

@ -1,52 +0,0 @@
name: Bug Report
description: Found something you weren't expecting? Report it here!
labels: kind/bug
body:
- type: markdown
attributes:
value: |
NOTE: If your issue is a security concern, please send an email to security@vikunja.io instead of opening a public issue. [More information about our security policy](https://vikunja.io/contact/#security).
- type: markdown
attributes:
value: |
Please fill out this issue template to report a bug.
1. If you want to propose a new feature, please open a discussion thread in the forum: https://community.vikunja.io
2. Please ask questions or configuration/deploy problems on our [Matrix Room](https://matrix.to/#/#vikunja:matrix.org) or forum (https://community.vikunja.io).
3. Make sure you are using the latest release and
take a moment to check that your issue hasn't been reported before.
4. Please give all relevant information below for bug reports, because
incomplete details will be handled as an invalid report and closed.
- type: textarea
id: description
attributes:
label: Description
description: |
Please provide a description of your issue here, with a URL if you were able to reproduce the issue (see below).
- type: input
id: version
attributes:
label: Vikunja Version
description: Vikunja version (or commit reference) of your instance
validations:
required: true
- type: input
id: browser-version
attributes:
label: Browser and version
description: If your issue is related to a frontend problem, please provide the browser and version you used to reproduce it.
- type: dropdown
id: can-reproduce
attributes:
label: Can you reproduce the bug on the Vikunja demo site?
options:
- "Please select"
- "Yes"
- "No"
validations:
required: true
- type: textarea
id: screenshots
attributes:
label: Screenshots
description: If this issue involves the Web Interface, please provide one or more screenshots

View File

@ -1,14 +0,0 @@
blank_issues_enabled: false
contact_links:
- name: Forum
url: https://community.vikunja.io/
about: Feature Requests, Questions, configuration or deployment problems should be discussed in the forum.
- name: Security-related issues
url: https://vikunja.io/contact/#security
about: For security concerns, please send a mail to security@vikunja.io instead of opening a public issue.
- name: Chat on Matrix
url: https://matrix.to/#/#vikunja:matrix.org
about: Please ask any quick questions here.
- name: Translations
url: https://crowdin.com/project/vikunja
about: Any problems or requests for new languages about translations should be handled in crowdin.

View File

@ -1,23 +0,0 @@
name: 'Repo Lockdown'
on:
pull_request_target:
types: opened
permissions:
issues: write
pull-requests: write
jobs:
action:
runs-on: ubuntu-latest
steps:
- uses: dessant/repo-lockdown@v4
with:
pr-comment: 'Hi! Thank you for your contribution.
This repo is only a mirror and unfortunately we can''t accept PRs made here. Please re-submit your changes to [our Gitea instance](https://kolaente.dev/vikunja/vikunja/pulls).
Also check out the [contribution guidelines](https://vikunja.io/docs/development/#pull-requests).
Thank you for your understanding.'

4
.gitignore vendored
View File

@ -4,8 +4,6 @@
config.yml
config.yaml
!docs/config.yml
!.github/ISSUE_TEMPLATE/config.yml
!.gitea/ISSUE_TEMPLATE/config.yml
docs/themes/
*.db
Run
@ -27,5 +25,3 @@ vikunja-dump*
vendor/
os-packages/
mage_output_file.go
mage-static
.direnv/

View File

@ -6,19 +6,17 @@ linters:
enable:
- megacheck
- govet
- goconst
- gocritic
- gocyclo
- goerr113
- goheader
- gofmt
- goimports
- revive
- golint
- misspell
disable:
- scopelint # Obsolete, using exportloopref instead
- durationcheck
- goconst
- musttag
presets:
- bugs
- unused
@ -37,7 +35,6 @@ issues:
linters:
- gocyclo
- deadcode
- errorlint
- path: pkg/integrations/*
linters:
- gocyclo
@ -55,6 +52,7 @@ issues:
- path: pkg/migration/*
linters:
- exhaustive
- goconst
- goerr113
- path: pkg/models/task_collection_filter\.go
linters:
@ -79,32 +77,6 @@ issues:
- path: pkg/routes/api/v1/docs.go
linters:
- goheader
- misspell
- gosmopolitan
- text: "Missed string"
linters:
- goheader
- path: pkg/.*/error.go
linters:
- errorlint
- path: pkg/models/favorites\.go
linters:
- nilerr
- path: pkg/models/project\.go
text: "string `parent_project_id` has 3 occurrences, make it a constant"
- path: pkg/models/events\.go
linters:
- musttag
- path: pkg/models/task_collection.go
text: 'append result not assigned to the same slice'
- path: pkg/modules/migration/ticktick/ticktick_test.go
linters:
- testifylint
- path: pkg/migration/*
text: "parameter 'tx' seems to be unused, consider removing or renaming it as"
linters:
- revive
- path: pkg/models/typesense.go
text: 'structtag: struct field Position repeats json tag "position" also at'
linters:
- govet

View File

@ -1,14 +0,0 @@
{
"recommendations": [
"codezombiech.gitignore",
"dbaeumer.vscode-eslint",
"editorconfig.editorconfig",
"vue.volar",
"vue.vscode-typescript-vue-plugin",
"lokalise.i18n-ally",
"mgmcdermott.vscode-language-babel",
"mikestead.dotenv",
"Syler.sass-indented",
"zixuanchen.vitest-explorer"
]
}

37
.vscode/settings.json vendored
View File

@ -1,37 +0,0 @@
{
"go.testEnvVars": {
"VIKUNJA_SERVICE_ROOTPATH": "${workspaceRoot}"
},
"eslint.packageManager": "pnpm",
"editor.formatOnSave": false,
"editor.codeActionsOnSave": {
"source.fixAll": "explicit"
},
"eslint.format.enable": true,
"[javascript]": {
"editor.defaultFormatter": "dbaeumer.vscode-eslint"
},
"[typescript]": {
"editor.defaultFormatter": "dbaeumer.vscode-eslint"
},
// https://eslint.vuejs.org/user-guide/#editor-integrations
"eslint.validate": [
"javascript",
"javascriptreact",
"vue"
],
"volar.completion.preferredTagNameCase": "pascal",
// disable vetur in case it is installed
"vetur.validation.template": false,
// i18n ally
"i18n-ally.localesPaths": [
"src/i18n/lang"
],
"i18n-ally.sortKeys": true,
"i18n-ally.keepFulfilled": true,
"i18n-ally.keystyle": "nested"
}

File diff suppressed because it is too large Load Diff

View File

@ -1,3 +0,0 @@
# Contribution Guidelines
Please check out the guidelines on https://vikunja.io/docs/development/

View File

@ -1,48 +1,51 @@
# syntax=docker/dockerfile:1
FROM --platform=$BUILDPLATFORM node:20.12.2-alpine AS frontendbuilder
WORKDIR /build
##############
# Build stage
FROM golang:1-alpine3.12 AS build-env
ENV PNPM_CACHE_FOLDER .cache/pnpm/
ENV PUPPETEER_SKIP_DOWNLOAD true
ENV CYPRESS_INSTALL_BINARY 0
ARG VIKUNJA_VERSION
ENV TAGS "sqlite"
ENV GO111MODULE=on
COPY frontend/ ./
# Build deps
RUN apk --no-cache add build-base git
RUN corepack enable && \
pnpm install && \
pnpm run build
# Setup repo
COPY . ${GOPATH}/src/code.vikunja.io/api
WORKDIR ${GOPATH}/src/code.vikunja.io/api
FROM --platform=$BUILDPLATFORM techknowlogick/xgo:go-1.21.x AS apibuilder
RUN go install github.com/magefile/mage@latest && \
mv /go/bin/mage /usr/local/go/bin
WORKDIR /go/src/code.vikunja.io/api
COPY . ./
COPY --from=frontendbuilder /build/dist ./frontend/dist
ARG TARGETOS TARGETARCH TARGETVARIANT
ENV GOPROXY https://goproxy.kolaente.de
RUN export PATH=$PATH:$GOPATH/bin && \
mage build:clean && \
mage release:xgo "${TARGETOS}/${TARGETARCH}/${TARGETVARIANT}"
# ┬─┐┬ ┐┌┐┐┌┐┐┬─┐┬─┐
# │┬┘│ │││││││├─ │┬┘
# ┘└┘┘─┘┘└┘┘└┘┴─┘┘└┘
# Checkout version if set
RUN if [ -n "${VIKUNJA_VERSION}" ]; then git checkout "${VIKUNJA_VERSION}"; fi \
&& go install github.com/magefile/mage \
&& mage build:clean build
###################
# The actual image
FROM scratch
# Note: I wanted to use the scratch image here, but unfortunatly the go-sqlite bindings require cgo and
# because of this, the container would not start when I compiled the image without cgo.
FROM alpine:3.12
LABEL maintainer="maintainers@vikunja.io"
WORKDIR /app/vikunja
ENTRYPOINT [ "/app/vikunja/vikunja" ]
EXPOSE 3456
USER 1000
WORKDIR /app/vikunja/
COPY --from=build-env /go/src/code.vikunja.io/api/vikunja .
ENV VIKUNJA_SERVICE_ROOTPATH=/app/vikunja/
ENV VIKUNJA_DATABASE_PATH=/db/vikunja.db
COPY --from=apibuilder /build/vikunja-* vikunja
COPY --from=apibuilder /etc/ssl/certs/ca-certificates.crt /etc/ssl/certs/
# Dynamic permission changing stuff
ENV PUID 1000
ENV PGID 1000
RUN apk --no-cache add shadow && \
addgroup -g ${PGID} vikunja && \
adduser -s /bin/sh -D -G vikunja -u ${PUID} vikunja -h /app/vikunja -H && \
chown vikunja -R /app/vikunja
COPY run.sh /run.sh
# Fix time zone settings not working
RUN apk --no-cache add tzdata
# Files permissions
RUN mkdir /app/vikunja/files && \
chown -R vikunja /app/vikunja/files
VOLUME /app/vikunja/files
CMD ["/run.sh"]
EXPOSE 3456

View File

@ -1,19 +1,16 @@
<img src="https://vikunja.io/images/vikunja-logo.svg" alt="" style="display: block;width: 50%;margin: 0 auto;" width="50%"/>
[![Build Status](https://drone.kolaente.de/api/badges/vikunja/vikunjaa/status.svg)](https://drone.kolaente.de/vikunja/vikunja)
[![Build Status](https://drone.kolaente.de/api/badges/vikunja/api/status.svg)](https://drone.kolaente.de/vikunja/api)
[![License: AGPL v3](https://img.shields.io/badge/License-AGPL%20v3-blue.svg)](LICENSE)
[![Download](https://img.shields.io/badge/download-v0.23.0-brightgreen.svg)](https://dl.vikunja.io)
[![Docker Pulls](https://img.shields.io/docker/pulls/vikunja/vikunja.svg)](https://hub.docker.com/r/vikunja/vikunja/)
[![Download](https://img.shields.io/badge/download-v0.17.0-brightgreen.svg)](https://dl.vikunja.io)
[![Docker Pulls](https://img.shields.io/docker/pulls/vikunja/api.svg)](https://hub.docker.com/r/vikunja/api/)
[![Swagger Docs](https://img.shields.io/badge/swagger-docs-brightgreen.svg)](https://try.vikunja.io/api/v1/docs)
[![Go Report Card](https://goreportcard.com/badge/kolaente.dev/vikunja/vikunja)](https://goreportcard.com/report/kolaente.dev/vikunja/vikunja)
[![Go Report Card](https://goreportcard.com/badge/kolaente.dev/vikunja/api)](https://goreportcard.com/report/kolaente.dev/vikunja/api)
# Vikunja
# Vikunja API
> The Todo-app to organize your life.
If Vikunja is useful to you, please consider [buying me a coffee](https://www.buymeacoffee.com/kolaente), [sponsoring me on GitHub](https://github.com/sponsors/kolaente) or buying [a sticker pack](https://vikunja.cloud/stickers).
I'm also offering [a hosted version of Vikunja](https://vikunja.cloud/) if you want a hassle-free solution for yourself or your team.
# Table of contents
* [Security Reports](#security-reports)
@ -29,7 +26,13 @@ If you find any security-related issues you don't want to disclose publicly, ple
## Features
See [the features page](https://vikunja.io/features/) on our website for a more exhaustive list or
* Create TODO lists with tasks
* Reminder for tasks
* Namespaces: A "group" which bundles multiple lists
* Share lists and namespaces with teams and users with granular permissions
* Plenty of details for tasks
See [the features page](https://vikunja.io/en/features/) on our website for a more exaustive list or
try it on [try.vikunja.io](https://try.vikunja.io)!
## Docs
@ -46,10 +49,13 @@ All docs can be found on [the Vikunja home page](https://vikunja.io/docs/).
See [the roadmap](https://my.vikunja.cloud/share/QFyzYEmEYfSyQfTOmIRSwLUpkFjboaBqQCnaPmWd/auth) (hosted on Vikunja!) for more!
* [ ] [Mobile apps](https://code.vikunja.io/app) (separate repo) *In Progress*
* [ ] [Webapp](https://code.vikunja.io/frontend) (separate repo) *In Progress*
## Contributing
Please check out the contribuition guidelines on [the website](https://vikunja.io/docs/development/).
Fork -> Push -> Pull-Request. Also see the [dev docs](https://vikunja.io/docs/development/) for more info.
## License
This project is licensed under the AGPLv3 License. See the [LICENSE](LICENSE) file for the full license text.
This project is licensed under the AGPLv3 License. See the [LICENSE](LICENSE) file for the full license text.

View File

@ -0,0 +1,12 @@
#!/usr/bin/env bash
curl -X POST http://localhost:3456/api/v1/register -H 'Content-Type: application/json' -d '{"username":"demo","password":"demo","email":"demo@vikunja.io"}'
BEARER=`curl -X POST -H 'Content-Type: application/json' -d '{"username": "demo", "password":"demo"}' localhost:3456/api/v1/login | jq -r '.token'`
echo "Bearer: $BEARER"
curl -X POST localhost:3456/api/v1/tokenTest -H "Authorization: Bearer $BEARER"
curl -X PUT localhost:3456/api/v1/namespaces/1/lists -H 'Content-Type: application/json' -H "Authorization: Bearer $BEARER" -d '{"title":"lorem"}'
curl -X PUT localhost:3456/api/v1/lists/1 -H 'Content-Type: application/json' -H "Authorization: Bearer $BEARER" -d '{"text":"lorem"}'
curl -X PUT -H "Authorization: Bearer $BEARER" localhost:3456/api/v1/tasks/1/attachments -F 'files=@/home/konrad/Pictures/Wallpaper/greg-rakozy-_Q4mepyyjMw-unsplash.jpg'

29
REST-Tests/auth.http Normal file
View File

@ -0,0 +1,29 @@
### Authorization by token, part 1. Retrieve and save token.
POST http://localhost:8080/api/v1/login
Content-Type: application/json
{
"username": "user3",
"password": "1234"
}
> {% client.global.set("auth_token", response.body.token); %}
### Register
POST http://localhost:8080/api/v1/register
Content-Type: application/json
{
"username": "user",
"password": "1234",
"email": "5@knt.li"
}
###
# Token test
POST http://localhost:8080/api/v1/tokenTest
Authorization: Bearer {{auth_token}}
Content-Type: application/json
###

70
REST-Tests/labels.http Normal file
View File

@ -0,0 +1,70 @@
# Get all labels
GET http://localhost:8080/api/v1/labels
Authorization: Bearer {{auth_token}}
###
# Add a new label
PUT http://localhost:8080/api/v1/labels
Authorization: Bearer {{auth_token}}
Content-Type: application/json
{
"title": "test5"
}
###
# Delete a label
DELETE http://localhost:8080/api/v1/labels/6
Authorization: Bearer {{auth_token}}
###
# Update a label
POST http://localhost:8080/api/v1/labels/1
Authorization: Bearer {{auth_token}}
Content-Type: application/json
{
"title": "testschinkenbrot",
"description": "käsebrot"
}
###
# Get one label
GET http://localhost:8080/api/v1/labels/1
Authorization: Bearer {{auth_token}}
###
# Get all labels on a task
GET http://localhost:8080/api/v1/tasks/3565/labels
Authorization: Bearer {{auth_token}}
###
# Add a new label to a task
PUT http://localhost:8080/api/v1/tasks/35236365/labels
Authorization: Bearer {{auth_token}}
Content-Type: application/json
{
"label_id": 1
}
###
# Delete a label from a task
DELETE http://localhost:8080/api/v1/tasks/3565/labels/1
Authorization: Bearer {{auth_token}}
###
# Add a new label to a task
POST http://localhost:8080/api/v1/tasks/3565/labels/bulk
Authorization: Bearer {{auth_token}}
Content-Type: application/json
{
"labels": [
{"id": 1},
{"id": 2},
{"id": 3}
]
}
###

177
REST-Tests/lists.http Normal file
View File

@ -0,0 +1,177 @@
# Get all lists
GET http://localhost:8080/api/v1/namespaces/35/lists
Authorization: Bearer {{auth_token}}
###
# Get one list
GET http://localhost:8080/api/v1/lists/3
Authorization: Bearer {{auth_token}}
###
# Add a new list
PUT http://localhost:8080/api/v1/namespaces/35/lists
Authorization: Bearer {{auth_token}}
Content-Type: application/json
{
"title": "test"
}
###
# Add a new item
PUT http://localhost:8080/api/v1/lists/1
Authorization: Bearer {{auth_token}}
Content-Type: application/json
{
"text": "Task",
"description": "Schinken"
}
###
# Delete a task from a list
DELETE http://localhost:8080/api/v1/lists/14
Authorization: Bearer {{auth_token}}
###
# Get all teams who have access to that list
GET http://localhost:8080/api/v1/lists/28/teams
Authorization: Bearer {{auth_token}}
###
# Give a team access to that list
PUT http://localhost:8080/api/v1/lists/1/teams
Authorization: Bearer {{auth_token}}
Content-Type: application/json
{"team_id":2, "right": 1}
###
# Update a teams access to that list
POST http://localhost:8080/api/v1/lists/1/teams/2
Authorization: Bearer {{auth_token}}
Content-Type: application/json
{"right": 0}
###
# Delete a team from a list
DELETE http://localhost:8080/api/v1/lists/10235/teams/1
Authorization: Bearer {{auth_token}}
###
# Delete a team from a list
DELETE http://localhost:8080/api/v1/lists/10235/teams/1
Authorization: Bearer {{auth_token}}
###
# Get all users who have access to that list
GET http://localhost:8080/api/v1/lists/28/users
Authorization: Bearer {{auth_token}}
###
# Give a user access to that list
PUT http://localhost:8080/api/v1/lists/3/users
Authorization: Bearer {{auth_token}}
Content-Type: application/json
{"userID":"user4", "right":1}
###
# Update a users access to that list
POST http://localhost:8080/api/v1/lists/30/users/3
Authorization: Bearer {{auth_token}}
Content-Type: application/json
{"right":2}
###
# Delete a user from a list
DELETE http://localhost:8080/api/v1/lists/28/users/3
Authorization: Bearer {{auth_token}}
###
# Get all pending tasks
GET http://localhost:8080/api/v1/tasks/all
Authorization: Bearer {{auth_token}}
###
# Get all pending tasks with priorities
GET http://localhost:8080/api/v1/tasks/all?sort=priorityasc
Authorization: Bearer {{auth_token}}
###
# Get all pending tasks in a range
GET http://localhost:8080/api/v1/tasks/all/dueadateasc/1546784000/1548784000
Authorization: Bearer {{auth_token}}
###
# Get all pending tasks in caldav
GET http://localhost:8080/api/v1/tasks/caldav
#Authorization: Bearer {{auth_token}}
###
# Update a task
POST http://localhost:8080/api/v1/tasks/3565
Authorization: Bearer {{auth_token}}
Content-Type: application/json
{
"priority": 0
}
###
# Bulk update multiple tasks at once
POST http://localhost:8080/api/v1/tasks/bulk
Authorization: Bearer {{auth_token}}
Content-Type: application/json
{
"task_ids": [3518,3519,3521],
"text":"bulkupdated"
}
###
# Get all assignees
GET http://localhost:8080/api/v1/tasks/3565/assignees
Authorization: Bearer {{auth_token}}
###
# Add a bunch of assignees
PUT http://localhost:8080/api/v1/tasks/3565/assignees/bulk
Authorization: Bearer {{auth_token}}
Content-Type: application/json
{
"assignees": [
{"id": 17}
]
}
###
# Get all users who have access to a list
GET http://localhost:8080/api/v1/lists/3/users
Authorization: Bearer {{auth_token}}
###

View File

@ -0,0 +1,71 @@
# Get all namespaces
GET http://localhost:8080/api/v1/namespaces
Authorization: Bearer {{auth_token}}
###
# Get one namespaces
GET http://localhost:8080/api/v1/namespaces/-1
Authorization: Bearer {{auth_token}}
###
# Get all users who have access to that namespace
GET http://localhost:8080/api/v1/namespaces/12/users
Authorization: Bearer {{auth_token}}
###
# Give a user access to that namespace
PUT http://localhost:8080/api/v1/namespaces/1/users
Authorization: Bearer {{auth_token}}
Content-Type: application/json
{"user_id":3, "right": 0}
###
# Update a users access to that namespace
POST http://localhost:8080/api/v1/namespaces/1/users/3
Authorization: Bearer {{auth_token}}
Content-Type: application/json
{"right": 2}
###
# Delete a user from a namespace
DELETE http://localhost:8080/api/v1/namespaces/1/users/2
Authorization: Bearer {{auth_token}}
###
# Get all teams who have access to that namespace
GET http://localhost:8080/api/v1/namespaces/1/teams
Authorization: Bearer {{auth_token}}
###
# Give a team access to that namespace
PUT http://localhost:8080/api/v1/namespaces/1/teams
Authorization: Bearer {{auth_token}}
Content-Type: application/json
{"team_id":3, "right": 0}
###
# Update a teams access to that namespace
POST http://localhost:8080/api/v1/namespaces/1/teams/1
Authorization: Bearer {{auth_token}}
Content-Type: application/json
{"right": 0}
###
# Delete a team from a namespace
DELETE http://localhost:8080/api/v1/namespaces/1/teams/2
Authorization: Bearer {{auth_token}}
###

29
REST-Tests/teams.http Normal file
View File

@ -0,0 +1,29 @@
# Get all teams
GET http://localhost:8080/api/v1/teams
Authorization: Bearer {{auth_token}}
###
# Get one team
GET http://localhost:8080/api/v1/teams/28
Authorization: Bearer {{auth_token}}
###
# Add a new member to that team
PUT http://localhost:8080/api/v1/teams/28/members
Authorization: Bearer {{auth_token}}
Content-Type: application/json
{
"user_id": 2
}
###
# Delete a member from a team
DELETE http://localhost:8080/api/v1/teams/28/members/2
Authorization: Bearer {{auth_token}}
###

53
REST-Tests/users.http Normal file
View File

@ -0,0 +1,53 @@
# Get all users
GET http://localhost:8080/api/v1/user
Authorization: Bearer {{auth_token}}
######
# Search for a user
GET http://localhost:8080/api/v1/users?s=3
Authorization: Bearer {{auth_token}}
###
## Update password
POST http://localhost:8080/api/v1/user/password
Authorization: Bearer {{auth_token}}
Content-Type: application/json
{
"old_password": "1234",
"new_password": "1234"
}
### Request a password to reset a password
POST http://localhost:8080/api/v1/user/password/token
Content-Type: application/json
Accept: application/json
{
"email": "k@knt.li"
}
### Request a token to reset a password
POST http://localhost:8080/api/v1/user/password/reset
Content-Type: application/json
Accept: application/json
{
"token": "eAsZzakgqARnjzXHqsHqZtSUKuiOhoJjHANhgTxUIDBSalhbtdpAdLeywGXzVDBuRQGNpHdMxoHXhLVSlzpJsFvuoJgMdkhRhkNhaQXfufuZCdtUlerZHSJQLgYMUryHIxIREcmZLtWoZVrYyARkCvkyFhcGtoCwQOEjAOEZMQQuxTVoGYfAqcfNggQnerUcXCiRIgRtkusXSnltomhaeyRwAbrckXFeXxUjslgplSGqSTOqJTYuhrSzAVTwNvuYyvuXLaZoNnJEyeVDWlRydnxfgUQjQZOKwCBRWVQPKpZhlslLUyUAMsRQkHITkruQCjDnOGCCRsSNplbNCEuDmMfpWYHSQAcQIDZtbQWkxzpfmHDMQvvKPPrxEnrTErlvTfKDKICFYPQxXNpNE",
"new_password": "1234"
}
### Confirm a users email address
POST http://localhost:8080/api/v1/user/confirm
Content-Type: application/json
Accept: application/json
{
"token": ""
}
###

View File

@ -1,59 +0,0 @@
[changelog]
body = """
{% if version %}\
## [{{ version | trim_start_matches(pat="v") }}] - {{ timestamp | date(format="%Y-%m-%d") }}
{% else %}\
## [unreleased]
{% endif %}\
{% for group, commits in commits | group_by(attribute="group") %}
### {{ group | upper_first }}
{% for commit in commits
| filter(attribute="scope")
| sort(attribute="scope") %}
* *({{commit.scope}})* {{ commit.message | upper_first }}
{%- if commit.breaking %}
{% raw %} {% endraw %}- **BREAKING**: {{commit.breaking_description}}
{%- endif -%}
{%- endfor -%}
{%- for commit in commits %}
{%- if commit.scope -%}
{% else -%}
* {{ commit.message | upper_first }} ([{{ commit.id | truncate(length=7, end="") }}]({{ commit.id }}))
{% if commit.breaking -%}
{% raw %} {% endraw %}- **BREAKING**: {{commit.breaking_description}}
{% endif -%}
{% endif -%}
{% endfor -%}
{% raw %}\n{% endraw %}\
{% endfor %}\n
"""
#{% for group, commits in commits | group_by(attribute="group") %}
# ### {{ group | upper_first }}
# {% for commit in commits %}\
# - {% if commit.breaking %}[**breaking**] {% endif %}{{ commit.message | upper_first }} ([{{ commit.id | truncate(length=7, end="") }}]({{ commit.id }}))
# {% endfor %}\
#{% endfor %}\n
# remove the leading and trailing whitespace from the template
trim = true
[git]
conventional_commits = true
filter_unconventional = false
commit_parsers = [
{ message = ".*(deps).*", group = "Dependencies"},
{ message = "^feat", group = "Features"},
{ message = "^fix", group = "Bug Fixes"},
{ message = "^doc", group = "Documentation"},
{ message = "^perf", group = "Performance"},
{ message = "^refactor", group = "Refactor"},
{ message = "^style", group = "Styling"},
{ message = "^test", group = "Testing"},
{ message = "^chore\\(release\\): prepare for", skip = true},
{ message = "^chore", group = "Miscellaneous Tasks"},
{ body = ".*security", group = "Security"},
{ message = ".*", group = "Other", default_scope = "other"}, # Everything that's not a conventional commit goes into the "Other" category
]

View File

@ -1,5 +1,5 @@
Vikunja is a to-do list application to facilitate your life.
Copyright 2018-present Vikunja and contributors. All rights reserved.
Copyright 2018-2021 Vikunja and contributors. All rights reserved.
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public Licensee as published by

View File

@ -3,21 +3,10 @@ service:
# Default is a random token which will be generated at each startup of vikunja.
# (This means all already issued tokens will be invalid once you restart vikunja)
JWTSecret: "<jwt-secret>"
# The duration of the issued JWT tokens in seconds.
# The default is 259200 seconds (3 Days).
jwtttl: 259200
# The duration of the "remember me" time in seconds. When the login request is made with
# the long param set, the token returned will be valid for this period.
# The default is 2592000 seconds (30 Days).
jwtttllong: 2592000
# The interface on which to run the webserver
interface: ":3456"
# Path to Unix socket. If set, it will be created and used instead of tcp
unixsocket:
# Permission bits for the Unix socket. Note that octal values must be prefixed by "0o", e.g. 0o660
unixsocketmode:
# The public facing URL where your users can reach Vikunja. Used in emails and for the communication between api and frontend.
publicurl: ""
# The URL of the frontend, used to send password reset emails.
frontendurl: ""
# The base path on the file system where the binary and assets are.
# Vikunja will also look in this path for a config file, so you could provide only this variable to point to a folder
# with a config file which will then be used.
@ -28,7 +17,7 @@ service:
enablecaldav: true
# Set the motd message, available from the /info endpoint
motd: ""
# Enable sharing of project via a link
# Enable sharing of lists via a link
enablelinksharing: true
# Whether to let new users registering themselves or not
enableregistration: true
@ -40,54 +29,27 @@ service:
enabletaskcomments: true
# Whether totp is enabled. In most cases you want to leave that enabled.
enabletotp: true
# If not empty, enables logging of crashes and unhandled errors in sentry.
sentrydsn: ''
# If not empty, this will enable `/test/{table}` endpoints which allow to put any content in the database.
# Used to reset the db before frontend tests. Because this is quite a dangerous feature allowing for lots of harm,
# each request made to this endpoint needs to provide an `Authorization: <token>` header with the token from below. <br/>
# each request made to this endpoint neefs to provide an `Authorization: <token>` header with the token from below. <br/>
# **You should never use this unless you know exactly what you're doing**
testingtoken: ''
# If enabled, vikunja will send an email to everyone who is either assigned to a task or created it when a task reminder
# is due.
enableemailreminders: true
# If true, will allow users to request the complete deletion of their account. When using external authentication methods
# it may be required to coordinate with them in order to delete the account. This setting will not affect the cli commands
# for user deletion.
enableuserdeletion: true
# The maximum size clients will be able to request for user avatars.
# If clients request a size bigger than this, it will be changed on the fly.
maxavatarsize: 1024
# If set to true, the frontend will show a big red warning not to use this instance for real data as it will be cleared out.
# You probably don't need to set this value, it was created specifically for usage on [try](https://try.vikunja.io).
demomode: false
# Allow changing the logo and other icons based on various occasions throughout the year.
allowiconchanges: true
# Allow using a custom logo via external URL.
customlogourl: ''
# Enables the public team feature. If enabled, it is possible to configure teams to be public, which makes them
# discoverable when sharing a project, therefore not only showing teams the user is member of.
enablepublicteams: false
sentry:
# If set to true, enables anonymous error tracking of api errors via Sentry. This allows us to gather more
# information about errors in order to debug and fix it.
enabled: false
# Configure the Sentry dsn used for api error tracking. Only used when Sentry is enabled for the api.
dsn: "https://440eedc957d545a795c17bbaf477497c@o1047380.ingest.sentry.io/4504254983634944"
# If set to true, enables anonymous error tracking of frontend errors via Sentry. This allows us to gather more
# information about errors in order to debug and fix it.
frontendenabled: false
# Configure the Sentry dsn used for frontend error tracking. Only used when Sentry is enabled for the frontend.
frontenddsn: "https://85694a2d757547cbbc90cd4b55c5a18d@o1047380.ingest.sentry.io/6024480"
database:
# Database type to use. Supported values are mysql, postgres and sqlite. Vikunja is able to run with MySQL 8.0+, Mariadb 10.2+, PostgreSQL 12+, and sqlite.
# Database type to use. Supported types are mysql, postgres and sqlite.
type: "sqlite"
# Database user which is used to connect to the database.
user: "vikunja"
# Database password
# Databse password
password: ""
# Database host
# Databse host
host: "localhost"
# Database to use
# Databse to use
database: "vikunja"
# When using sqlite, this is the path where to store the data
path: "./vikunja.db"
@ -95,47 +57,39 @@ database:
maxopenconnections: 100
# Sets the maximum number of idle connections to the db.
maxidleconnections: 50
# The maximum lifetime of a single db connection in milliseconds.
# The maximum lifetime of a single db connection in miliseconds.
maxconnectionlifetime: 10000
# Secure connection mode. Only used with postgres.
# (see https://pkg.go.dev/github.com/lib/pq?tab=doc#hdr-Connection_String_Parameters)
sslmode: disable
# The path to the client cert. Only used with postgres.
sslcert: ""
# The path to the client key. Only used with postgres.
sslkey: ""
# The path to the ca cert. Only used with postgres.
sslrootcert: ""
# Enable SSL/TLS for mysql connections. Options: false, true, skip-verify, preferred
tls: false
typesense:
# Whether to enable the Typesense integration. If true, all tasks will be synced to the configured Typesense
# instance and all search and filtering will run through Typesense instead of only through the database.
# Typesense allows fast fulltext search including fuzzy matching support. It may return different results than
# what you'd get with a database-only search.
cache:
# If cache is enabled or not
enabled: false
# The url to the Typesense instance you want to use. Can be hosted locally or in Typesense Cloud as long
# as Vikunja is able to reach it.
url: ''
# The Typesense API key you want to use.
apikey: ''
# Cache type. Possible values are "keyvalue", "memory" or "redis".
# When choosing "keyvalue" this setting follows the one configured in the "keyvalue" section.
# When choosing "redis" you will need to configure the redis connection seperately.
type: keyvalue
# When using memory this defines the maximum size an element can take
maxelementsize: 1000
redis:
# Whether to enable redis or not
enabled: false
# The host of the redis server including its port.
host: 'localhost:6379'
# The password used to authenticate against the redis server
# The password used to authenicate against the redis server
password: ''
# 0 means default database
db: 0
cors:
# Whether to enable or disable cors headers.
# Note: If you want to put the frontend and the api on separate domains or ports, you will need to enable this.
# Note: If you want to put the frontend and the api on seperate domains or ports, you will need to enable this.
# Otherwise the frontend won't be able to make requests to the api through the browser.
enable: false
enable: true
# A list of origins which may access the api. These need to include the protocol (`http://` or `https://`) and port, if any.
origins:
- "*"
@ -147,11 +101,8 @@ mailer:
enabled: false
# SMTP Host
host: ""
# SMTP Host port.
# **NOTE:** If you're unable to send mail and the only error you see in the logs is an `EOF`, try setting the port to `25`.
# SMTP Host port
port: 587
# SMTP Auth Type. Can be either `plain`, `login` or `cram-md5`.
authtype: "plain"
# SMTP username
username: "user"
# SMTP password
@ -182,16 +133,12 @@ log:
databaselevel: "WARNING"
# Whether to log http requests or not. Possible values are stdout, stderr, file or off to disable http logging.
http: "stdout"
# Echo has its own logging which usually is unnecessary, which is why it is disabled by default. Possible values are stdout, stderr, file or off to disable standard logging.
# Echo has its own logging which usually is unnessecary, which is why it is disabled by default. Possible values are stdout, stderr, file or off to disable standard logging.
echo: "off"
# Whether or not to log events. Useful for debugging. Possible values are stdout, stderr, file or off to disable events logging.
events: "off"
events: "stdout"
# The log level for event log messages. Possible values (case-insensitive) are ERROR, INFO, DEBUG.
eventslevel: "info"
# Whether or not to log mail log messages. This will not log mail contents. Possible values are stdout, stderr, file or off to disable mail-related logging.
mail: "off"
# The log level for mail log messages. Possible values (case-insensitive) are ERROR, WARNING, INFO, DEBUG.
maillevel: "info"
ratelimit:
# whether or not to enable the rate limit
@ -206,10 +153,6 @@ ratelimit:
# Possible values are "keyvalue", "memory" or "redis".
# When choosing "keyvalue" this setting follows the one configured in the "keyvalue" section.
store: keyvalue
# The number of requests a user can make from the same IP to all unauthenticated routes (login, register,
# password confirmation, email verification, password reset request) per minute. This limit cannot be disabled.
# You should only change this if you know what you're doing.
noauthlimit: 10
files:
# The path where files are stored
@ -219,6 +162,21 @@ files:
maxsize: 20MB
migration:
# These are the settings for the wunderlist migrator
wunderlist:
# Wheter to enable the wunderlist migrator or not
enable: false
# The client id, required for making requests to the wunderlist api
# You need to register your vikunja instance at https://developer.wunderlist.com/apps/new to get this
clientid:
# The client secret, also required for making requests to the wunderlist api
clientsecret:
# The url where clients are redirected after they authorized Vikunja to access their wunderlist stuff.
# This needs to match the url you entered when registering your Vikunja instance at wunderlist.
# This is usually the frontend url where the frontend then makes a request to /migration/wunderlist/migrate
# with the code obtained from the wunderlist api.
# Note that the vikunja frontend expects this to be /migrate/wunderlist
redirecturl:
todoist:
# Wheter to enable the todoist migrator or not
enable: false
@ -232,9 +190,9 @@ migration:
# This is usually the frontend url where the frontend then makes a request to /migration/todoist/migrate
# with the code obtained from the todoist api.
# Note that the vikunja frontend expects this to be /migrate/todoist
redirecturl: <frontend url>/migrate/todoist
redirecturl:
trello:
# Whether to enable the trello migrator or not
# Wheter to enable the trello migrator or not
enable: false
# The client id, required for making requests to the trello api
# You need to register your vikunja instance at https://trello.com/app-key (log in before you visit that link) to get this
@ -250,7 +208,7 @@ migration:
enable: false
# The client id, required for making requests to the microsoft graph api
# See https://docs.microsoft.com/en-us/azure/active-directory/develop/quickstart-register-app#register-an-application
# for information about how to register your Vikunja instance.
# for information about how to register your vikuinja instance.
clientid:
# The client secret, also required for making requests to the microsoft graph api
clientsecret:
@ -266,14 +224,14 @@ avatar:
gravatarexpiration: 3600
backgrounds:
# Whether to enable backgrounds for projects at all.
# Whether to enable backgrounds for lists at all.
enabled: true
providers:
upload:
# Whether to enable uploaded project backgrounds
# Whethere to enable uploaded list backgrounds
enabled: true
unsplash:
# Whether to enable setting backgrounds from unsplash as project backgrounds
# Whether to enable setting backgrounds from unsplash as list backgrounds
enabled: false
# You need to create an application for your installation at https://unsplash.com/oauth/applications/new
# and set the access token below.
@ -293,7 +251,7 @@ legal:
# Key Value Storage settings
# The Key Value Storage is used for different kinds of things like metrics and a few cache systems.
keyvalue:
# The type of the storage backend. Can be either "memory" or "redis". If "redis" is chosen it needs to be configured separately.
# The type of the storage backend. Can be either "memory" or "redis". If "redis" is chosen it needs to be configured seperately.
type: "memory"
auth:
@ -304,73 +262,34 @@ auth:
enabled: true
# OpenID configuration will allow users to authenticate through a third-party OpenID Connect compatible provider.<br/>
# The provider needs to support the `openid`, `profile` and `email` scopes.<br/>
# **Note:** Some openid providers (like Gitlab) only make the email of the user available through OpenID if they have set it to be publicly visible.
# **Note:** Some openid providers (like gitlab) only make the email of the user available through openid claims if they have set it to be publicly visible.
# If the email is not public in those cases, authenticating will fail.
# **Note 2:** The frontend expects the third party to rediect the user <frontend-url>/auth/openid/<auth key> after authentication. Please make sure to configure the redirect url in your third party auth service accordingly if you're using the default vikunja frontend.
# The frontend will automatically provide the API with the redirect url, composed from the current url where it's hosted.
# If you want to use the desktop client with OpenID, make sure to allow redirects to `127.0.0.1`.
# Take a look at the [default config file](https://kolaente.dev/vikunja/vikunja/src/branch/main/config.yml.sample) for more information about how to configure openid authentication.
# **Note 2:** The frontend expects to be redirected after authentication by the third party
# to <frontend-url>/auth/openid/<auth key>. Please make sure to configure the redirect url with your third party
# auth service accordingy if you're using the default vikunja frontend.
# Take a look at the [default config file](https://kolaente.dev/vikunja/api/src/branch/main/config.yml.sample) for more information about how to configure openid authentication.
openid:
# Enable or disable OpenID Connect authentication
enabled: false
# The url to redirect clients to. Defaults to the configured frontend url. If you're using Vikunja with the official
# frontend, you don't need to change this value.
redirecturl: <frontend url>
# A list of enabled providers
providers:
# The name of the provider as it will appear in the frontend.
- name:
# The auth url to send users to if they want to authenticate using OpenID Connect.
authurl:
# The oidc logouturl that users will be redirected to on logout.
# Leave empty or delete key, if you do not want to be redirected.
logouturl:
# The client ID used to authenticate Vikunja at the OpenID Connect provider.
clientid:
# The client secret used to authenticate Vikunja at the OpenID Connect provider.
clientsecret:
# The scope necessary to use oidc.
# If you want to use the Feature to create and assign to vikunja teams via oidc, you have to add the custom "vikunja_scope" and check [openid.md](https://vikunja.io/docs/openid/).
# e.g. scope: openid email profile vikunja_scope
scope: openid email profile
# Prometheus metrics endpoint
metrics:
# If set to true, enables a /metrics endpoint for prometheus to collect metrics about Vikunja. You can query it from `/api/v1/metrics`.
# If set to true, enables a /metrics endpoint for prometheus to collect metrics about Vikunja.
enabled: false
# If set to a non-empty value the /metrics endpoint will require this as a username via basic auth in combination with the password below.
username:
# If set to a non-empty value the /metrics endpoint will require this as a password via basic auth in combination with the username below.
password:
# Provide default settings for new users. When a new user is created, these settings will automatically be set for the user. If you change them in the config file afterwards they will not be changed back for existing users.
defaultsettings:
# The avatar source for the user. Can be `gravatar`, `initials`, `upload` or `marble`. If you set this to `upload` you'll also need to specify `defaultsettings.avatar_file_id`.
avatar_provider: initials
# The id of the file used as avatar.
avatar_file_id: 0
# If set to true users will get task reminders via email.
email_reminders_enabled: false
# If set to true will allow other users to find this user when searching for parts of their name.
discoverable_by_name: false
# If set to true will allow other users to find this user when searching for their exact email.
discoverable_by_email: false
# If set to true will send an email every day with all overdue tasks at a configured time.
overdue_tasks_reminders_enabled: true
# When to send the overdue task reminder email.
overdue_tasks_reminders_time: 9:00
# The id of the default project. Make sure users actually have access to this project when setting this value.
default_project_id: 0
# Start of the week for the user. `0` is sunday, `1` is monday and so on.
week_start: 0
# The language of the user interface. Must be an ISO 639-1 language code followed by an ISO 3166-1 alpha-2 country code. Check https://kolaente.dev/vikunja/vikunja/frontend/src/branch/main/src/i18n/lang for a list of possible languages. Will default to the browser language the user uses when signing up.
language: <unset>
# The time zone of each individual user. This will affect when users get reminders and overdue task emails.
timezone: <time zone set at service.timezone>
webhooks:
# Whether to enable support for webhooks
enabled: true
# The timout in seconds until a webhook request fails when no response has been received.
timoutseconds: 30
# The URL of [a mole instance](https://github.com/frain-dev/mole) to use to proxy outgoing webhook requests. You should use this and configure appropriately if you're not the only one using your Vikunja instance. More info about why: https://webhooks.fyi/best-practices/webhook-providers#implement-security-on-egress-communication. Must be used in combination with `webhooks.password` (see below).
proxyurl:
# The proxy password to use when authenticating against the proxy.
proxypassword:

6
desktop/.gitignore vendored
View File

@ -1,6 +0,0 @@
node_modules/
.idea/
frontend/
dist/
*.zip
*.tgz

View File

@ -1,317 +0,0 @@
# Changelog
THIS CHANGELOG ONLY EXISTS FOR HISTORICAL REASONS.
Starting with version 0.23.0, all changes are logged in the CHANGELOG.md in the root of this repository since the repos were merged.
All notable changes to this project will be documented in this file.
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
All releases can be found on https://code.vikunja.io/desktop/releases.
The releases aim at the api and frontend versions which is why there are missing versions.
## [0.22.1] - 2024-01-28
### Dependencies
* *(deps)* Update dependency electron-builder to v24.9.1 (#180)
* *(deps)* Update dependency electron to v28 (#183)
* *(deps)* Update dependency electron to v28.1.4 (#185)
## [0.22.0] - 2023-12-19
### Bug Fixes
* Version in release files ([63519c1](63519c15d2d077a7dc5b95a964c77d0019cb555e))
* Add script ([e5b4cc2](e5b4cc23e48010d6d6b414f6ca8c9e170ec8021c))
* Properly replace version ([5977a93](5977a931d0be88d26073121e94053010b7ee93c6))
* Read frontend version from release zip ([51376d0](51376d05dee332f1717fc01c3e7d8b1a61ee7773))
### Dependencies
* *(deps)* Update dependency electron to v25.3.0 (#163)
* *(deps)* Update dependency electron to v25.3.1 (#165)
* *(deps)* Update dependency electron to v25.6.0 (#167)
* *(deps)* Update dependency electron to v25.7.0 (#169)
* *(deps)* Update dependency electron-builder to v24.6.3 (#166)
* *(deps)* Update dependency electron to v25.8.0 (#170)
* *(deps)* Update dependency electron to v26 (#168)
* *(deps)* Update lockfile
* *(deps)* Update dependency electron to v26.2.2 (#174)
* *(deps)* Update dependency electron-builder to v24.6.4 (#171)
* *(deps)* Update dependency electron to v26.2.3 (#175)
* *(deps)* Update dependency electron to v26.3.0 (#176)
* *(deps)* Update dependency electron to v27 (#177)
### Miscellaneous Tasks
* *(ci)* Debug
## [0.21.0] - 2023-07-07
### Dependencies
* *(deps)* Update dependency electron to v22.1.0 (#134)
* *(deps)* Update dependency electron to v22.2.0 (#135)
* *(deps)* Update dependency electron to v23 (#136)
* *(deps)* Update dependency electron to v23.1.0 (#138)
* *(deps)* Update dependency electron to v23.1.1 (#139)
* *(deps)* Update dependency electron to v23.1.2 (#140)
* *(deps)* Update dependency electron to v23.1.3 (#141)
* *(deps)* Update dependency electron to v23.1.4 (#142)
* *(deps)* Update dependency electron to v23.2.0 (#143)
* *(deps)* Update dependency express to v4.18.2 (#144)
* *(deps)* Update dependency electron to v23.2.1 (#145)
* *(deps)* Update dependency electron to v23.2.2 (#146)
* *(deps)* Update dependency electron to v24 (#147)
* *(deps)* Update dependency electron to v24.1.1 (#148)
* *(deps)* Update dependency electron to v24.1.2 (#149)
* *(deps)* Update dependency electron to v24.1.3 (#150)
* *(deps)* Update dependency electron to v24.3.1 (#151)
* *(deps)* Update dependency electron to v24.4.0 (#152)
* *(deps)* Update dependency electron to v25 (#153)
* *(deps)* Update dependency electron to v25.0.1 (#155)
* *(deps)* Update dependency electron to v25.1.0 (#156)
* *(deps)* Update dependency electron to v25.1.1 (#158)
* *(deps)* Update dependency electron to v25.2.0 (#159)
* *(deps)* Update dependency electron-builder to v24 (#157)
* *(deps)* Update dependency connect-history-api-fallback to v2 (#103)
### Miscellaneous Tasks
* Remove sponsor ([c02c5d0](c02c5d009ffcef7984c2feebf7df4f25444b24e1))
## [0.20.3] - 2023-01-24
### Bug Fixes
* Open links in OS default browser ([9915318](99153187d77d5b2311bc2a87864f70b9d2563370))
### Dependencies
* *(deps)* Update dependency electron to v22.0.1 (#131)
* *(deps)* Update dependency electron to v22.0.2 (#132)
* *(deps)* Update dependency electron to v22.0.3 (#133)
## [0.20.2] - 2022-12-18
### Dependencies
* *(deps)* Update dependency electron to v21.3.1 (#128)
* *(deps)* Update dependency electron to v22 (#129)
## [0.20.1] - 2022-11-11
### Dependencies
* *(deps)* Update dependency electron to v21.2.1 (#125)
* *(deps)* Update dependency electron to v21.2.2 (#126)
## [0.20.0] - 2022-10-28
### Dependencies
* *(deps)* Update dependency electron to v21.1.0 (#120)
* *(deps)* Update dependency electron-builder to v23.6.0 (#122)
* *(deps)* Update dependency electron to v21.1.1 (#123)
* *(deps)* Update dependency electron to v21.2.0 (#124)
## [0.19.1 - 2022-08-17]
### Dependencies
* *(deps)* Update dependency electron to v20.0.2 (#111)
* *(deps)* Update dependency electron to v20.0.3 (#112)
* *(deps)* Update dependency electron to v20.1.1 (#113)
* *(deps)* Update dependency electron to v20.1.2 (#114)
* *(deps)* Update dependency electron to v20.1.3 (#115)
* *(deps)* Update dependency electron to v20.1.4 (#116)
* *(deps)* Update dependency electron to v20.2.0 (#117)
* *(deps)* Update dependency electron to v21 (#118)
* *(deps)* Update dependency electron to v21.0.1 (#119)
### Features
* Add sponsor to readme (relm) ([5b4d5c7](5b4d5c784b4ea447ea928c8c9ee83a58b51f10f4))
### Miscellaneous Tasks
* Disable mac builds ([0563fb2](0563fb2ee5ae16357cdd9463be33ca3f3977c596))
## [0.19.0 - 2022-08-03]
### Dependencies
* *(deps)* Update dependency electron-builder to v22.13.1 (#61)
* *(deps)* Update dependency electron to v15.2.0 (#60)
* *(deps)* Update dependency electron to v15.3.0 (#63)
* *(deps)* Update dependency electron to v15.3.1 (#64)
* *(deps)* Update dependency electron to v15.3.2 (#66)
* *(deps)* Update dependency electron to v16 (#65)
* *(deps)* Update dependency electron to v16.0.1 (#67)
* *(deps)* Update dependency electron-builder to v22.14.5 (#68)
* *(deps)* Update dependency electron to v16.0.2 (#69)
* *(deps)* Update dependency electron to v16.0.3 (#71)
* *(deps)* Update dependency electron to v16.0.4 (#72)
* *(deps)* Update dependency electron to v16.0.5 (#73)
* *(deps)* Update dependency electron to v16.0.6 (#74)
* *(deps)* Update dependency electron to v16.0.7 (#75)
* *(deps)* Update dependency electron to v16.0.8 (#76)
* *(deps)* Update dependency electron to v17 (#77)
* *(deps)* Update dependency electron-builder to v22.14.13 (#78)
* *(deps)* Update dependency electron to v17.0.1 (#79)
* *(deps)* Update dependency electron to v17.1.0 (#80)
* *(deps)* Update dependency electron to v17.1.1 (#81)
* *(deps)* Update dependency electron to v17.1.2 (#82)
* *(deps)* Update dependency electron to v17.2.0 (#83)
* *(deps)* Update dependency electron to v17.3.0 (#84)
* *(deps)* Update dependency electron to v18 (#85)
* *(deps)* Update dependency electron to v18.0.1 (#86)
* *(deps)* Update dependency electron to v18.0.2 (#87)
* *(deps)* Update dependency electron to v18.0.3 (#88)
* *(deps)* Update dependency electron-builder to v23 (#89)
* *(deps)* Update dependency electron to v18.0.4 (#90)
* *(deps)* Update dependency electron to v18.1.0 (#91)
* *(deps)* Update dependency electron to v18.2.0 (#92)
* *(deps)* Update dependency electron to v18.2.2 (#93)
* *(deps)* Update dependency electron to v18.2.3 (#94)
* *(deps)* Update dependency electron to v18.2.4 (#95)
* *(deps)* Update dependency electron to v18.3.1 (#96)
* *(deps)* Update dependency electron to v19 (#97)
* *(deps)* Update dependency electron to v19.0.2 (#98)
* *(deps)* Update dependency electron to v19.0.3 (#99)
* *(deps)* Update dependency electron to v19.0.4 (#100)
* *(deps)* Update dependency electron to v19.0.6 (#101)
* *(deps)* Update dependency electron-builder to v23.1.0 (#102)
* *(deps)* Update dependency electron to v19.0.7 (#104)
* *(deps)* Update dependency electron to v19.0.8 (#105)
* *(deps)* Update dependency electron to v19.0.9 (#106)
* *(deps)* Update dependency electron to v19.0.10 (#107)
* *(deps)* Update dependency electron-builder to v23.3.3 (#108)
* *(deps)* Update dependency electron to v20 (#109)
* *(deps)* Update dependency electron to v20.0.1 (#110)
### Miscellaneous Tasks
* *(ci)* Use latest s3 plugin
* *(ci)* Sign drone config
### Other
* *(other)* Update dependency electron to v14.0.1 (#58)
* *(other)* Update dependency electron to v15 (#59)
## [0.18.0 - 2021-09-05]
### Added
* Add drone pipeline for PR
* Enable mac builds
### Changed
* Cleanup
* Fix sed for macos
* Install yarn on mac
* Only upload .dmg files for macos builds
* Sign drone config
### Dependency Updates
* Update dependency electron-builder to v22.11.7 (#45)
* Update dependency electron to v13.0.1 (#41)
* Update dependency electron to v13.1.0 (#42)
* Update dependency electron to v13.1.1 (#43)
* Update dependency electron to v13.1.2 (#44)
* Update dependency electron to v13.1.3 (#46)
* Update dependency electron to v13.1.4 (#47)
* Update dependency electron to v13.1.5 (#48)
* Update dependency electron to v13.1.6 (#49)
* Update dependency electron to v13.1.7 (#50)
* Update dependency electron to v13.1.8 (#51)
* Update dependency electron to v13.1.9 (#52)
* Update dependency electron to v13.2.0 (#53)
* Update dependency electron to v13.2.1 (#54)
* Update dependency electron to v13.2.2 (#55)
* Update dependency electron to v13.2.3 (#56)
* Update dependency electron to v13 (#39)
* Update dependency electron to v14 (#57)
## [0.17.0 - 2021-05-20]
For a list of changes in this release, see [the frontend changelog](https://kolaente.dev/vikunja/frontend/releases/tag/v0.17.0).
### Added
* Add darwin release pipeline
* Add pipeline type
### Changed
* Change release target path for unstable releases
* Change version to download to unstable
* Disable the mac builds for now
* Move release steps in one pipeline step for macos
* Switch main branch to main
* Switch to wine-mono for building
### Fixed
* Fix missing application icon on Linux. (#19)
* Fix version in package.json
### Dependency Updates
* Update dependency electron-builder to v22.10.5 (#23)
* Update dependency electron-builder to v22.11.1 (#31)
* Update dependency electron-builder to v22.11.2 (#33)
* Update dependency electron-builder to v22.11.3 (#34)
* Update dependency electron-builder to v22.11.4 (#35)
* Update dependency electron-builder to v22.11.5 (#37)
* Update dependency electron to v11.2.0 (#12)
* Update dependency electron to v11.2.1 (#14)
* Update dependency electron to v11.2.2 (#20)
* Update dependency electron to v11.2.3 (#21)
* Update dependency electron to v11.3.0 (#22)
* Update dependency electron to v12.0.1 (#25)
* Update dependency electron to v12.0.2 (#26)
* Update dependency electron to v12.0.3 (#27)
* Update dependency electron to v12.0.4 (#28)
* Update dependency electron to v12.0.5 (#29)
* Update dependency electron to v12.0.6 (#30)
* Update dependency electron to v12.0.7 (#32)
* Update dependency electron to v12.0.8 (#36)
* Update dependency electron to v12.0.9 (#38)
* Update dependency electron to v12 (#24)
## [0.16.0 - 2021-01-10]
For a list of changes in this release, see [the frontend changelog](https://kolaente.dev/vikunja/frontend/releases/tag/v0.16.0).
### Added
* Add yarn cache to drone
* Configure Renovate (#1)
### Changed
* Change license to GPLv3
* Pin dependencies (#2)
* Update dependency electron to v10.1.5 (#3)
* Update dependency electron to v11.0.1 (#5)
* Update dependency electron to v11.0.2 (#6)
* Update dependency electron to v11.0.3 (#7)
* Update dependency electron to v11.0.4 (#8)
* Update dependency electron to v11.1.0 (#9)
* Update dependency electron to v11.1.1 (#10)
* Update dependency electron to v11 (#4)
## [0.15.0 - 2020-10-19]
First initial release.
For a list of changes in this release, see [the frontend changelog](https://kolaente.dev/vikunja/frontend/releases/tag/v0.15.0).

View File

@ -1,675 +0,0 @@
GNU GENERAL PUBLIC LICENSE
Version 3, 29 June 2007
Copyright (C) 2007 Free Software Foundation, Inc. <https://fsf.org/>
Everyone is permitted to copy and distribute verbatim copies
of this license document, but changing it is not allowed.
Preamble
The GNU General Public License is a free, copyleft license for
software and other kinds of works.
The licenses for most software and other practical works are designed
to take away your freedom to share and change the works. By contrast,
the GNU General Public License is intended to guarantee your freedom to
share and change all versions of a program--to make sure it remains free
software for all its users. We, the Free Software Foundation, use the
GNU General Public License for most of our software; it applies also to
any other work released this way by its authors. You can apply it to
your programs, too.
When we speak of free software, we are referring to freedom, not
price. Our General Public Licenses are designed to make sure that you
have the freedom to distribute copies of free software (and charge for
them if you wish), that you receive source code or can get it if you
want it, that you can change the software or use pieces of it in new
free programs, and that you know you can do these things.
To protect your rights, we need to prevent others from denying you
these rights or asking you to surrender the rights. Therefore, you have
certain responsibilities if you distribute copies of the software, or if
you modify it: responsibilities to respect the freedom of others.
For example, if you distribute copies of such a program, whether
gratis or for a fee, you must pass on to the recipients the same
freedoms that you received. You must make sure that they, too, receive
or can get the source code. And you must show them these terms so they
know their rights.
Developers that use the GNU GPL protect your rights with two steps:
(1) assert copyright on the software, and (2) offer you this License
giving you legal permission to copy, distribute and/or modify it.
For the developers' and authors' protection, the GPL clearly explains
that there is no warranty for this free software. For both users' and
authors' sake, the GPL requires that modified versions be marked as
changed, so that their problems will not be attributed erroneously to
authors of previous versions.
Some devices are designed to deny users access to install or run
modified versions of the software inside them, although the manufacturer
can do so. This is fundamentally incompatible with the aim of
protecting users' freedom to change the software. The systematic
pattern of such abuse occurs in the area of products for individuals to
use, which is precisely where it is most unacceptable. Therefore, we
have designed this version of the GPL to prohibit the practice for those
products. If such problems arise substantially in other domains, we
stand ready to extend this provision to those domains in future versions
of the GPL, as needed to protect the freedom of users.
Finally, every program is threatened constantly by software patents.
States should not allow patents to restrict development and use of
software on general-purpose computers, but in those that do, we wish to
avoid the special danger that patents applied to a free program could
make it effectively proprietary. To prevent this, the GPL assures that
patents cannot be used to render the program non-free.
The precise terms and conditions for copying, distribution and
modification follow.
TERMS AND CONDITIONS
0. Definitions.
"This License" refers to version 3 of the GNU General Public License.
"Copyright" also means copyright-like laws that apply to other kinds of
works, such as semiconductor masks.
"The Program" refers to any copyrightable work licensed under this
License. Each licensee is addressed as "you". "Licensees" and
"recipients" may be individuals or organizations.
To "modify" a work means to copy from or adapt all or part of the work
in a fashion requiring copyright permission, other than the making of an
exact copy. The resulting work is called a "modified version" of the
earlier work or a work "based on" the earlier work.
A "covered work" means either the unmodified Program or a work based
on the Program.
To "propagate" a work means to do anything with it that, without
permission, would make you directly or secondarily liable for
infringement under applicable copyright law, except executing it on a
computer or modifying a private copy. Propagation includes copying,
distribution (with or without modification), making available to the
public, and in some countries other activities as well.
To "convey" a work means any kind of propagation that enables other
parties to make or receive copies. Mere interaction with a user through
a computer network, with no transfer of a copy, is not conveying.
An interactive user interface displays "Appropriate Legal Notices"
to the extent that it includes a convenient and prominently visible
feature that (1) displays an appropriate copyright notice, and (2)
tells the user that there is no warranty for the work (except to the
extent that warranties are provided), that licensees may convey the
work under this License, and how to view a copy of this License. If
the interface presents a list of user commands or options, such as a
menu, a prominent item in the list meets this criterion.
1. Source Code.
The "source code" for a work means the preferred form of the work
for making modifications to it. "Object code" means any non-source
form of a work.
A "Standard Interface" means an interface that either is an official
standard defined by a recognized standards body, or, in the case of
interfaces specified for a particular programming language, one that
is widely used among developers working in that language.
The "System Libraries" of an executable work include anything, other
than the work as a whole, that (a) is included in the normal form of
packaging a Major Component, but which is not part of that Major
Component, and (b) serves only to enable use of the work with that
Major Component, or to implement a Standard Interface for which an
implementation is available to the public in source code form. A
"Major Component", in this context, means a major essential component
(kernel, window system, and so on) of the specific operating system
(if any) on which the executable work runs, or a compiler used to
produce the work, or an object code interpreter used to run it.
The "Corresponding Source" for a work in object code form means all
the source code needed to generate, install, and (for an executable
work) run the object code and to modify the work, including scripts to
control those activities. However, it does not include the work's
System Libraries, or general-purpose tools or generally available free
programs which are used unmodified in performing those activities but
which are not part of the work. For example, Corresponding Source
includes interface definition files associated with source files for
the work, and the source code for shared libraries and dynamically
linked subprograms that the work is specifically designed to require,
such as by intimate data communication or control flow between those
subprograms and other parts of the work.
The Corresponding Source need not include anything that users
can regenerate automatically from other parts of the Corresponding
Source.
The Corresponding Source for a work in source code form is that
same work.
2. Basic Permissions.
All rights granted under this License are granted for the term of
copyright on the Program, and are irrevocable provided the stated
conditions are met. This License explicitly affirms your unlimited
permission to run the unmodified Program. The output from running a
covered work is covered by this License only if the output, given its
content, constitutes a covered work. This License acknowledges your
rights of fair use or other equivalent, as provided by copyright law.
You may make, run and propagate covered works that you do not
convey, without conditions so long as your license otherwise remains
in force. You may convey covered works to others for the sole purpose
of having them make modifications exclusively for you, or provide you
with facilities for running those works, provided that you comply with
the terms of this License in conveying all material for which you do
not control copyright. Those thus making or running the covered works
for you must do so exclusively on your behalf, under your direction
and control, on terms that prohibit them from making any copies of
your copyrighted material outside their relationship with you.
Conveying under any other circumstances is permitted solely under
the conditions stated below. Sublicensing is not allowed; section 10
makes it unnecessary.
3. Protecting Users' Legal Rights From Anti-Circumvention Law.
No covered work shall be deemed part of an effective technological
measure under any applicable law fulfilling obligations under article
11 of the WIPO copyright treaty adopted on 20 December 1996, or
similar laws prohibiting or restricting circumvention of such
measures.
When you convey a covered work, you waive any legal power to forbid
circumvention of technological measures to the extent such circumvention
is effected by exercising rights under this License with respect to
the covered work, and you disclaim any intention to limit operation or
modification of the work as a means of enforcing, against the work's
users, your or third parties' legal rights to forbid circumvention of
technological measures.
4. Conveying Verbatim Copies.
You may convey verbatim copies of the Program's source code as you
receive it, in any medium, provided that you conspicuously and
appropriately publish on each copy an appropriate copyright notice;
keep intact all notices stating that this License and any
non-permissive terms added in accord with section 7 apply to the code;
keep intact all notices of the absence of any warranty; and give all
recipients a copy of this License along with the Program.
You may charge any price or no price for each copy that you convey,
and you may offer support or warranty protection for a fee.
5. Conveying Modified Source Versions.
You may convey a work based on the Program, or the modifications to
produce it from the Program, in the form of source code under the
terms of section 4, provided that you also meet all of these conditions:
a) The work must carry prominent notices stating that you modified
it, and giving a relevant date.
b) The work must carry prominent notices stating that it is
released under this License and any conditions added under section
7. This requirement modifies the requirement in section 4 to
"keep intact all notices".
c) You must license the entire work, as a whole, under this
License to anyone who comes into possession of a copy. This
License will therefore apply, along with any applicable section 7
additional terms, to the whole of the work, and all its parts,
regardless of how they are packaged. This License gives no
permission to license the work in any other way, but it does not
invalidate such permission if you have separately received it.
d) If the work has interactive user interfaces, each must display
Appropriate Legal Notices; however, if the Program has interactive
interfaces that do not display Appropriate Legal Notices, your
work need not make them do so.
A compilation of a covered work with other separate and independent
works, which are not by their nature extensions of the covered work,
and which are not combined with it such as to form a larger program,
in or on a volume of a storage or distribution medium, is called an
"aggregate" if the compilation and its resulting copyright are not
used to limit the access or legal rights of the compilation's users
beyond what the individual works permit. Inclusion of a covered work
in an aggregate does not cause this License to apply to the other
parts of the aggregate.
6. Conveying Non-Source Forms.
You may convey a covered work in object code form under the terms
of sections 4 and 5, provided that you also convey the
machine-readable Corresponding Source under the terms of this License,
in one of these ways:
a) Convey the object code in, or embodied in, a physical product
(including a physical distribution medium), accompanied by the
Corresponding Source fixed on a durable physical medium
customarily used for software interchange.
b) Convey the object code in, or embodied in, a physical product
(including a physical distribution medium), accompanied by a
written offer, valid for at least three years and valid for as
long as you offer spare parts or customer support for that product
model, to give anyone who possesses the object code either (1) a
copy of the Corresponding Source for all the software in the
product that is covered by this License, on a durable physical
medium customarily used for software interchange, for a price no
more than your reasonable cost of physically performing this
conveying of source, or (2) access to copy the
Corresponding Source from a network server at no charge.
c) Convey individual copies of the object code with a copy of the
written offer to provide the Corresponding Source. This
alternative is allowed only occasionally and noncommercially, and
only if you received the object code with such an offer, in accord
with subsection 6b.
d) Convey the object code by offering access from a designated
place (gratis or for a charge), and offer equivalent access to the
Corresponding Source in the same way through the same place at no
further charge. You need not require recipients to copy the
Corresponding Source along with the object code. If the place to
copy the object code is a network server, the Corresponding Source
may be on a different server (operated by you or a third party)
that supports equivalent copying facilities, provided you maintain
clear directions next to the object code saying where to find the
Corresponding Source. Regardless of what server hosts the
Corresponding Source, you remain obligated to ensure that it is
available for as long as needed to satisfy these requirements.
e) Convey the object code using peer-to-peer transmission, provided
you inform other peers where the object code and Corresponding
Source of the work are being offered to the general public at no
charge under subsection 6d.
A separable portion of the object code, whose source code is excluded
from the Corresponding Source as a System Library, need not be
included in conveying the object code work.
A "User Product" is either (1) a "consumer product", which means any
tangible personal property which is normally used for personal, family,
or household purposes, or (2) anything designed or sold for incorporation
into a dwelling. In determining whether a product is a consumer product,
doubtful cases shall be resolved in favor of coverage. For a particular
product received by a particular user, "normally used" refers to a
typical or common use of that class of product, regardless of the status
of the particular user or of the way in which the particular user
actually uses, or expects or is expected to use, the product. A product
is a consumer product regardless of whether the product has substantial
commercial, industrial or non-consumer uses, unless such uses represent
the only significant mode of use of the product.
"Installation Information" for a User Product means any methods,
procedures, authorization keys, or other information required to install
and execute modified versions of a covered work in that User Product from
a modified version of its Corresponding Source. The information must
suffice to ensure that the continued functioning of the modified object
code is in no case prevented or interfered with solely because
modification has been made.
If you convey an object code work under this section in, or with, or
specifically for use in, a User Product, and the conveying occurs as
part of a transaction in which the right of possession and use of the
User Product is transferred to the recipient in perpetuity or for a
fixed term (regardless of how the transaction is characterized), the
Corresponding Source conveyed under this section must be accompanied
by the Installation Information. But this requirement does not apply
if neither you nor any third party retains the ability to install
modified object code on the User Product (for example, the work has
been installed in ROM).
The requirement to provide Installation Information does not include a
requirement to continue to provide support service, warranty, or updates
for a work that has been modified or installed by the recipient, or for
the User Product in which it has been modified or installed. Access to a
network may be denied when the modification itself materially and
adversely affects the operation of the network or violates the rules and
protocols for communication across the network.
Corresponding Source conveyed, and Installation Information provided,
in accord with this section must be in a format that is publicly
documented (and with an implementation available to the public in
source code form), and must require no special password or key for
unpacking, reading or copying.
7. Additional Terms.
"Additional permissions" are terms that supplement the terms of this
License by making exceptions from one or more of its conditions.
Additional permissions that are applicable to the entire Program shall
be treated as though they were included in this License, to the extent
that they are valid under applicable law. If additional permissions
apply only to part of the Program, that part may be used separately
under those permissions, but the entire Program remains governed by
this License without regard to the additional permissions.
When you convey a copy of a covered work, you may at your option
remove any additional permissions from that copy, or from any part of
it. (Additional permissions may be written to require their own
removal in certain cases when you modify the work.) You may place
additional permissions on material, added by you to a covered work,
for which you have or can give appropriate copyright permission.
Notwithstanding any other provision of this License, for material you
add to a covered work, you may (if authorized by the copyright holders of
that material) supplement the terms of this License with terms:
a) Disclaiming warranty or limiting liability differently from the
terms of sections 15 and 16 of this License; or
b) Requiring preservation of specified reasonable legal notices or
author attributions in that material or in the Appropriate Legal
Notices displayed by works containing it; or
c) Prohibiting misrepresentation of the origin of that material, or
requiring that modified versions of such material be marked in
reasonable ways as different from the original version; or
d) Limiting the use for publicity purposes of names of licensors or
authors of the material; or
e) Declining to grant rights under trademark law for use of some
trade names, trademarks, or service marks; or
f) Requiring indemnification of licensors and authors of that
material by anyone who conveys the material (or modified versions of
it) with contractual assumptions of liability to the recipient, for
any liability that these contractual assumptions directly impose on
those licensors and authors.
All other non-permissive additional terms are considered "further
restrictions" within the meaning of section 10. If the Program as you
received it, or any part of it, contains a notice stating that it is
governed by this License along with a term that is a further
restriction, you may remove that term. If a license document contains
a further restriction but permits relicensing or conveying under this
License, you may add to a covered work material governed by the terms
of that license document, provided that the further restriction does
not survive such relicensing or conveying.
If you add terms to a covered work in accord with this section, you
must place, in the relevant source files, a statement of the
additional terms that apply to those files, or a notice indicating
where to find the applicable terms.
Additional terms, permissive or non-permissive, may be stated in the
form of a separately written license, or stated as exceptions;
the above requirements apply either way.
8. Termination.
You may not propagate or modify a covered work except as expressly
provided under this License. Any attempt otherwise to propagate or
modify it is void, and will automatically terminate your rights under
this License (including any patent licenses granted under the third
paragraph of section 11).
However, if you cease all violation of this License, then your
license from a particular copyright holder is reinstated (a)
provisionally, unless and until the copyright holder explicitly and
finally terminates your license, and (b) permanently, if the copyright
holder fails to notify you of the violation by some reasonable means
prior to 60 days after the cessation.
Moreover, your license from a particular copyright holder is
reinstated permanently if the copyright holder notifies you of the
violation by some reasonable means, this is the first time you have
received notice of violation of this License (for any work) from that
copyright holder, and you cure the violation prior to 30 days after
your receipt of the notice.
Termination of your rights under this section does not terminate the
licenses of parties who have received copies or rights from you under
this License. If your rights have been terminated and not permanently
reinstated, you do not qualify to receive new licenses for the same
material under section 10.
9. Acceptance Not Required for Having Copies.
You are not required to accept this License in order to receive or
run a copy of the Program. Ancillary propagation of a covered work
occurring solely as a consequence of using peer-to-peer transmission
to receive a copy likewise does not require acceptance. However,
nothing other than this License grants you permission to propagate or
modify any covered work. These actions infringe copyright if you do
not accept this License. Therefore, by modifying or propagating a
covered work, you indicate your acceptance of this License to do so.
10. Automatic Licensing of Downstream Recipients.
Each time you convey a covered work, the recipient automatically
receives a license from the original licensors, to run, modify and
propagate that work, subject to this License. You are not responsible
for enforcing compliance by third parties with this License.
An "entity transaction" is a transaction transferring control of an
organization, or substantially all assets of one, or subdividing an
organization, or merging organizations. If propagation of a covered
work results from an entity transaction, each party to that
transaction who receives a copy of the work also receives whatever
licenses to the work the party's predecessor in interest had or could
give under the previous paragraph, plus a right to possession of the
Corresponding Source of the work from the predecessor in interest, if
the predecessor has it or can get it with reasonable efforts.
You may not impose any further restrictions on the exercise of the
rights granted or affirmed under this License. For example, you may
not impose a license fee, royalty, or other charge for exercise of
rights granted under this License, and you may not initiate litigation
(including a cross-claim or counterclaim in a lawsuit) alleging that
any patent claim is infringed by making, using, selling, offering for
sale, or importing the Program or any portion of it.
11. Patents.
A "contributor" is a copyright holder who authorizes use under this
License of the Program or a work on which the Program is based. The
work thus licensed is called the contributor's "contributor version".
A contributor's "essential patent claims" are all patent claims
owned or controlled by the contributor, whether already acquired or
hereafter acquired, that would be infringed by some manner, permitted
by this License, of making, using, or selling its contributor version,
but do not include claims that would be infringed only as a
consequence of further modification of the contributor version. For
purposes of this definition, "control" includes the right to grant
patent sublicenses in a manner consistent with the requirements of
this License.
Each contributor grants you a non-exclusive, worldwide, royalty-free
patent license under the contributor's essential patent claims, to
make, use, sell, offer for sale, import and otherwise run, modify and
propagate the contents of its contributor version.
In the following three paragraphs, a "patent license" is any express
agreement or commitment, however denominated, not to enforce a patent
(such as an express permission to practice a patent or covenant not to
sue for patent infringement). To "grant" such a patent license to a
party means to make such an agreement or commitment not to enforce a
patent against the party.
If you convey a covered work, knowingly relying on a patent license,
and the Corresponding Source of the work is not available for anyone
to copy, free of charge and under the terms of this License, through a
publicly available network server or other readily accessible means,
then you must either (1) cause the Corresponding Source to be so
available, or (2) arrange to deprive yourself of the benefit of the
patent license for this particular work, or (3) arrange, in a manner
consistent with the requirements of this License, to extend the patent
license to downstream recipients. "Knowingly relying" means you have
actual knowledge that, but for the patent license, your conveying the
covered work in a country, or your recipient's use of the covered work
in a country, would infringe one or more identifiable patents in that
country that you have reason to believe are valid.
If, pursuant to or in connection with a single transaction or
arrangement, you convey, or propagate by procuring conveyance of, a
covered work, and grant a patent license to some of the parties
receiving the covered work authorizing them to use, propagate, modify
or convey a specific copy of the covered work, then the patent license
you grant is automatically extended to all recipients of the covered
work and works based on it.
A patent license is "discriminatory" if it does not include within
the scope of its coverage, prohibits the exercise of, or is
conditioned on the non-exercise of one or more of the rights that are
specifically granted under this License. You may not convey a covered
work if you are a party to an arrangement with a third party that is
in the business of distributing software, under which you make payment
to the third party based on the extent of your activity of conveying
the work, and under which the third party grants, to any of the
parties who would receive the covered work from you, a discriminatory
patent license (a) in connection with copies of the covered work
conveyed by you (or copies made from those copies), or (b) primarily
for and in connection with specific products or compilations that
contain the covered work, unless you entered into that arrangement,
or that patent license was granted, prior to 28 March 2007.
Nothing in this License shall be construed as excluding or limiting
any implied license or other defenses to infringement that may
otherwise be available to you under applicable patent law.
12. No Surrender of Others' Freedom.
If conditions are imposed on you (whether by court order, agreement or
otherwise) that contradict the conditions of this License, they do not
excuse you from the conditions of this License. If you cannot convey a
covered work so as to satisfy simultaneously your obligations under this
License and any other pertinent obligations, then as a consequence you may
not convey it at all. For example, if you agree to terms that obligate you
to collect a royalty for further conveying from those to whom you convey
the Program, the only way you could satisfy both those terms and this
License would be to refrain entirely from conveying the Program.
13. Use with the GNU Affero General Public License.
Notwithstanding any other provision of this License, you have
permission to link or combine any covered work with a work licensed
under version 3 of the GNU Affero General Public License into a single
combined work, and to convey the resulting work. The terms of this
License will continue to apply to the part which is the covered work,
but the special requirements of the GNU Affero General Public License,
section 13, concerning interaction through a network will apply to the
combination as such.
14. Revised Versions of this License.
The Free Software Foundation may publish revised and/or new versions of
the GNU General Public License from time to time. Such new versions will
be similar in spirit to the present version, but may differ in detail to
address new problems or concerns.
Each version is given a distinguishing version number. If the
Program specifies that a certain numbered version of the GNU General
Public License "or any later version" applies to it, you have the
option of following the terms and conditions either of that numbered
version or of any later version published by the Free Software
Foundation. If the Program does not specify a version number of the
GNU General Public License, you may choose any version ever published
by the Free Software Foundation.
If the Program specifies that a proxy can decide which future
versions of the GNU General Public License can be used, that proxy's
public statement of acceptance of a version permanently authorizes you
to choose that version for the Program.
Later license versions may give you additional or different
permissions. However, no additional obligations are imposed on any
author or copyright holder as a result of your choosing to follow a
later version.
15. Disclaimer of Warranty.
THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
16. Limitation of Liability.
IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
SUCH DAMAGES.
17. Interpretation of Sections 15 and 16.
If the disclaimer of warranty and limitation of liability provided
above cannot be given local legal effect according to their terms,
reviewing courts shall apply local law that most closely approximates
an absolute waiver of all civil liability in connection with the
Program, unless a warranty or assumption of liability accompanies a
copy of the Program in return for a fee.
END OF TERMS AND CONDITIONS
How to Apply These Terms to Your New Programs
If you develop a new program, and you want it to be of the greatest
possible use to the public, the best way to achieve this is to make it
free software which everyone can redistribute and change under these terms.
To do so, attach the following notices to the program. It is safest
to attach them to the start of each source file to most effectively
state the exclusion of warranty; and each file should have at least
the "copyright" line and a pointer to where the full notice is found.
<one line to give the program's name and a brief idea of what it does.>
Copyright (C) <year> <name of author>
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <https://www.gnu.org/licenses/>.
Also add information on how to contact you by electronic and paper mail.
If the program does terminal interaction, make it output a short
notice like this when it starts in an interactive mode:
<program> Copyright (C) <year> <name of author>
This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
This is free software, and you are welcome to redistribute it
under certain conditions; type `show c' for details.
The hypothetical commands `show w' and `show c' should show the appropriate
parts of the General Public License. Of course, your program's commands
might be different; for a GUI interface, you would use an "about box".
You should also get your employer (if you work as a programmer) or school,
if any, to sign a "copyright disclaimer" for the program, if necessary.
For more information on this, and how to apply and follow the GNU GPL, see
<https://www.gnu.org/licenses/>.
The GNU General Public License does not permit incorporating your program
into proprietary programs. If your program is a subroutine library, you
may consider it more useful to permit linking proprietary applications with
the library. If this is what you want to do, use the GNU Lesser General
Public License instead of this License. But first, please read
<https://www.gnu.org/licenses/why-not-lgpl.html>.

View File

@ -1,27 +0,0 @@
# Vikunja desktop
[![Build Status](https://drone.kolaente.de/api/badges/vikunja/desktop/status.svg)](https://drone.kolaente.de/vikunja/desktop)
[![License: GPL v3](https://img.shields.io/badge/License-GPL%20v3-blue.svg)](LICENSE)
[![Download](https://img.shields.io/badge/download-v0.22.1-brightgreen.svg)](https://dl.vikunja.io)
The Vikunja frontend all repackaged as an electron app to run as a desktop app!
## Dev
As this repo does not contain any code, only a thin wrapper around electron, you will need to do this to get the
actual frontend bundle and build the app:
```bash
rm -rf frontend vikunja-frontend-master.zip
wget https://dl.vikunja.io/frontend/vikunja-frontend-master.zip
unzip vikunja-frontend-master.zip -d frontend
sed -i 's/\/api\/v1//g' frontend/index.html # Make sure to trigger the "enter the Vikunja url" prompt
```
## Building for release
1. Run the snippet from above, but with a valid frontend version instead of `master`
2. Change the version in `package.json` (That's the one that will be used by electron-builder`
3. `yarn install`
4. `yarn dist --linux --windows`

Binary file not shown.

Binary file not shown.

Before

Width:  |  Height:  |  Size: 60 KiB

View File

@ -1,9 +0,0 @@
#!/bin/sh
set -xe
frontend_version=$(git describe --tags --always --abbrev=10)
sed -i "s/\${version}/$frontend_version/g" package.json
sed -i "s/\"version\": \".*\"/\"version\": \"$frontend_version\"/" package.json

View File

@ -1,59 +0,0 @@
[changelog]
body = """
{% if version %}\
## [{{ version | trim_start_matches(pat="v") }}] - {{ timestamp | date(format="%Y-%m-%d") }}
{% else %}\
## [unreleased]
{% endif %}\
{% for group, commits in commits | group_by(attribute="group") %}
### {{ group | upper_first }}
{% for commit in commits
| filter(attribute="scope")
| sort(attribute="scope") %}
* *({{commit.scope}})* {{ commit.message | upper_first }}
{%- if commit.breaking %}
{% raw %} {% endraw %}- **BREAKING**: {{commit.breaking_description}}
{%- endif -%}
{%- endfor -%}
{%- for commit in commits %}
{%- if commit.scope -%}
{% else -%}
* {{ commit.message | upper_first }} ([{{ commit.id | truncate(length=7, end="") }}]({{ commit.id }}))
{% if commit.breaking -%}
{% raw %} {% endraw %}- **BREAKING**: {{commit.breaking_description}}
{% endif -%}
{% endif -%}
{% endfor -%}
{% raw %}\n{% endraw %}\
{% endfor %}\n
"""
#{% for group, commits in commits | group_by(attribute="group") %}
# ### {{ group | upper_first }}
# {% for commit in commits %}\
# - {% if commit.breaking %}[**breaking**] {% endif %}{{ commit.message | upper_first }} ([{{ commit.id | truncate(length=7, end="") }}]({{ commit.id }}))
# {% endfor %}\
#{% endfor %}\n
# remove the leading and trailing whitespace from the template
trim = true
[git]
conventional_commits = true
filter_unconventional = false
commit_parsers = [
{ message = ".*(deps).*", group = "Dependencies"},
{ message = "^feat", group = "Features"},
{ message = "^fix", group = "Bug Fixes"},
{ message = "^doc", group = "Documentation"},
{ message = "^perf", group = "Performance"},
{ message = "^refactor", group = "Refactor"},
{ message = "^style", group = "Styling"},
{ message = "^test", group = "Testing"},
{ message = "^chore\\(release\\): prepare for", skip = true},
{ message = "^chore", group = "Miscellaneous Tasks"},
{ body = ".*security", group = "Security"},
{ message = ".*", group = "Other", default_scope = "other"}, # Everything that's not a conventional commit goes into the "Other" category
]

View File

@ -1,68 +0,0 @@
const {app, BrowserWindow, shell} = require('electron')
const path = require('path')
const express = require('express')
const eApp = express()
const portInUse = require('./portInUse.js')
const frontendPath = 'frontend/'
function createWindow() {
// Create the browser window.
const mainWindow = new BrowserWindow({
width: 1680,
height: 960,
webPreferences: {
nodeIntegration: true,
}
})
// Open external links in the browser
mainWindow.webContents.setWindowOpenHandler(({ url }) => {
shell.openExternal(url);
return { action: 'deny' };
});
// Hide the toolbar
mainWindow.setMenuBarVisibility(false)
// We try to use the same port every time and only use a different one if that does not succeed.
let port = 45735
portInUse(port, used => {
if(used) {
console.log(`Port ${port} already used, switching to a random one`)
port = 0 // This lets express choose a random port
}
// Start a local express server to serve static files
eApp.use(express.static(path.join(__dirname, frontendPath)))
// Handle urls set by the frontend
eApp.get('*', (request, response, next) => {
response.sendFile(`${__dirname}/${frontendPath}index.html`);
})
const server = eApp.listen(port, '127.0.0.1', () => {
console.log(`Server started on port ${server.address().port}`)
mainWindow.loadURL(`http://127.0.0.1:${server.address().port}`)
})
})
}
// This method will be called when Electron has finished
// initialization and is ready to create browser windows.
// Some APIs can only be used after this event occurs.
app.whenReady().then(() => {
createWindow()
app.on('activate', function () {
// On macOS it's common to re-create a window in the app when the
// dock icon is clicked and there are no other windows open.
if (BrowserWindow.getAllWindows().length === 0) createWindow()
})
})
// Quit when all windows are closed, except on macOS. There, it's common
// for applications and their menu bar to stay active until the user quits
// explicitly with Cmd + Q.
app.on('window-all-closed', () => {
if (process.platform !== 'darwin') app.quit()
})

View File

@ -1,61 +0,0 @@
{
"name": "vikunja-desktop",
"version": "0.21.0",
"description": "Vikunja's frontend as a standalone desktop application.",
"main": "main.js",
"repository": "https://code.vikunja.io/desktop",
"license": "GPL-3.0-or-later",
"author": {
"email": "maintainers@vikunja.io",
"name": "Vikunja Team"
},
"homepage": "https://vikunja.io",
"scripts": {
"start": "electron .",
"pack": "electron-builder --dir",
"dist": "electron-builder"
},
"build": {
"appId": "io.vikunja.desktop",
"productName": "Vikunja Desktop",
"artifactName": "${productName}-${version}.${ext}",
"icon": "build/icon.icns",
"linux": {
"target": [
"deb",
"AppImage",
"snap",
"pacman",
"apk",
"freebsd",
"rpm",
"zip",
"tar.gz"
],
"category": "Productivity"
},
"win": {
"target": [
"nsis",
"portable",
"msi",
"zip"
]
},
"mac": {
"category": "public.app-category.productivity",
"target": [
"dmg",
"zip"
]
}
},
"devDependencies": {
"electron": "29.3.1",
"electron-builder": "24.13.3"
},
"dependencies": {
"connect-history-api-fallback": "2.0.0",
"express": "4.19.2"
}
}

File diff suppressed because it is too large Load Diff

View File

@ -1,18 +0,0 @@
const net = require('net');
module.exports = function(port, callback) {
const server = net.createServer(function(socket) {
socket.write('Echo server\r\n');
socket.pipe(socket);
})
server.listen(port, '127.0.0.1');
server.on('error', function (e) {
callback(true)
})
server.on('listening', function (e) {
server.close()
callback(false)
})
}

View File

@ -0,0 +1,17 @@
image: vikunja/api:latest
manifests:
-
image: vikunja/api:latest-linux-amd64
platform:
architecture: amd64
os: linux
-
image: vikunja/api:latest-linux-arm64
platform:
architecture: arm64
os: linux
-
image: vikunja/api:latest-linux-arm
platform:
architecture: arm
os: linux

23
docker-manifest.tmpl Normal file
View File

@ -0,0 +1,23 @@
image: vikunja/api:{{#if build.tag}}{{trimPrefix "v" build.tag}}{{else}}latest{{/if}}
{{#if build.tags}}
tags:
{{#each build.tags}}
- {{this}}
{{/each}}
{{/if}}
manifests:
-
image: vikunja/api:{{#if build.tag}}{{trimPrefix "v" build.tag}}-{{/if}}linux-amd64
platform:
architecture: amd64
os: linux
-
image: vikunja/api:{{#if build.tag}}{{trimPrefix "v" build.tag}}-{{/if}}linux-arm64
platform:
architecture: arm64
os: linux
-
image: vikunja/api:{{#if build.tag}}{{trimPrefix "v" build.tag}}-{{/if}}linux-arm
platform:
architecture: arm
os: linux

View File

View File

@ -2,7 +2,7 @@ baseurl: https://vikunja.io/docs/
title: Vikunja
theme: vikunja
enableRobotsTXT: true
canonifyURLs: false
canonifyURLs: true
pygmentsUseClasses: true
@ -28,17 +28,14 @@ markup:
menu:
page:
- name: Home
url: https://vikunja.io/
url: https://vikunja.io/en/
weight: 10
- name: Features
url: https://vikunja.io/features
url: https://vikunja.io/en/features
weight: 20
- name: Download
url: https://vikunja.io/download
url: https://vikunja.io/en/download
weight: 30
- name: Blog
url: https://vikunja.io/blog/
weight: 35
- name: Docs
url: https://vikunja.io/docs
weight: 40
@ -48,16 +45,3 @@ menu:
- name: Community
url: https://community.vikunja.io/
weight: 60
- name: Stickers
url: https://vikunja.cloud/stickers?utm_source=io&utm_medium=io&utm_campaign=menu
weight: 65
- name: Get it Hosted
url: https://vikunja.cloud/?utm_source=io&utm_medium=io&utm_campaign=menu
weight: 70
sidebar:
- name: setup
weight: 10
- name: usage
weight: 20
- name: development
weight: 30

View File

@ -22,4 +22,4 @@ and [available configuration options]({{< ref "./setup/config.md">}}).
## Developing
If you want to start contributing to Vikunja, take a look at [the development docs]({{< ref "./development/development.md">}}).
If you want to start contributing to Vikunja, take a look at [the development docs]({{< ref "./development/development.md">}}).

View File

@ -1,6 +1,6 @@
---
date: "2019-03-31:00:00+01:00"
title: "Cli Commands"
title: "Adding new cli commands"
draft: false
type: "doc"
menu:
@ -12,14 +12,14 @@ menu:
All cli-related functions are located in `pkg/cmd`.
Each cli command usually calls a function in another package.
For example, the `vikunja migrate` command calls `migration.Migrate()`.
For example, the `vikunja migrate` command calls `migration.Migrate()`.
Vikunja uses the amazing [cobra](https://github.com/spf13/cobra) library for its cli.
Please refer to its documentation for information about how to use flags etc.
Please refer to its documentation for informations about how to use flags etc.
To add a new cli command, add something like the following:
```go
{{< highlight golang >}}
func init() {
rootCmd.AddCommand(myCmd)
}
@ -31,4 +31,4 @@ var myCmd = &cobra.Command{
// Call other functions
},
}
```
{{</ highlight >}}

View File

@ -1,41 +0,0 @@
---
date: "2019-02-12:00:00+02:00"
title: "Configuration Options"
draft: false
type: "doc"
menu:
sidebar:
parent: "development"
---
# Configuration options
All configuration variables are declared in the `config` package.
It uses [viper](https://github.com/spf13/viper) under the hood to handle setting defaults and parsing config files.
Viper handles parsing all different configuration sources.
## Adding new config options
To make handling configuration parameters a bit easier, we introduced a `Key` string type in the `config` package which
you can call directly to get a config value.
To add a new config option, you should add a new key const to `pkg/config/config.go` and possibly a default value.
Default values should always enable the feature to work or turn it off completely if it always needs
additional configuration.
Make sure to also add the new config option to the default config file (`config.yml.sample` at the root of the repository)
with an explanatory comment to make sure it is well documented.
Then run `mage generate-docs` to generate the configuration docs from the sample file.
## Getting Configuration Values
To retrieve a configured value call the key with a getter for the type you need.
For example:
```go
if config.CacheEnabled.GetBool() {
// Do something with enabled caches
}
```
Take a look at the methods declared on the type to see what's available.

View File

@ -1,33 +0,0 @@
---
title: "Cron Tasks"
date: 2021-07-13T23:21:52+02:00
draft: false
menu:
sidebar:
parent: "development"
---
# How to add a cron job task
Cron jobs are tasks which run on a predefined schedule.
Vikunja uses these through a light wrapper package around the excellent [github.com/robfig/cron](https://github.com/robfig/cron) package.
The package exposes a `cron.Schedule` method with two arguments: The first one to define the schedule when the cron task should run, and the second one with the actual function to run at the schedule. You would then create a new function to register your the actual cron task in your package.
A basic function to register a cron task looks like this:
```go
func RegisterSomeCronTask() {
err := cron.Schedule("0 * * * *", func() {
// Do something every hour
}
}
```
Call the register method in the `FullInit()` method of the `init` package to actually register it.
## Schedule Syntax
The cron syntax uses the same on you may know from unix systems.
It is described in detail [here](https://pkg.go.dev/github.com/robfig/cron#hdr-CRON_Expression_Format).

View File

@ -1,38 +0,0 @@
---
date: "2019-02-12:00:00+02:00"
title: "Database"
draft: false
type: "doc"
menu:
sidebar:
parent: "development"
---
# Database
Vikunja uses [xorm](https://xorm.io/) as an abstraction layer to handle the database connection.
Please refer to [their](https://xorm.io/docs/) documentation on how to exactly use it.
{{< table_of_contents >}}
## Using the database
When using the common web handlers, you get an `xorm.Session` to do database manipulations.
In other packages, use the `db.NewSession()` method to get a new database session.
## Adding new database tables
To add a new table to the database, create the struct and [add a migration for it]({{< ref "db-migrations.md" >}}).
To learn more about how to configure your struct to create "good" tables, refer to [the xorm documentaion](https://xorm.io/docs/).
In most cases you will also need to implement the `TableName() string` method on the new struct to make sure the table name matches the rest of the tables - plural.
## Adding data to test fixtures
Adding data for test fixtures can be done via `yaml` files in `pkg/models/fixtures`.
The name of the yaml file should match the table name in the database.
Adding values to it is done via array definition inside it.
**Note**: Table and column names need to be in snake_case as that's what is used internally in the database and for mapping values from the database to xorm so your structs can use it.

View File

@ -1,6 +1,6 @@
---
date: "2019-03-29:00:00+02:00"
title: "Database Migrations"
title: "Database migrations"
draft: false
type: "doc"
menu:
@ -25,11 +25,11 @@ All migrations are stored in `pkg/migrations` and files should have the same nam
Each migration should have a function to apply and roll it back, as well as a numeric id (the datetime)
and a more in-depth description of what the migration actually does.
To easily get a new id, run the following on any unix system:
To easily get a new id, run the following on any unix system:
```
{{< highlight bash >}}
date +%Y%m%d%H%M%S
```
{{< /highlight >}}
New migrations should be added via the `init()` function to the `migrations` variable.
All migrations are sorted before being executed, since `init()` does not guarantee the order.
@ -37,14 +37,9 @@ All migrations are sorted before being executed, since `init()` does not guarant
When you're adding a new struct, you also need to add it to the `models.GetTables()` function
to ensure it will be created on new installations.
### Generating a new migration stub
You can easily generate a pre-filled migration stub by running `mage dev:make-migration`.
It will ask you for a table name and generate an empty migration similar to the example shown below.
### Example
```go
{{< highlight golang >}}
package migration
import (
@ -73,6 +68,6 @@ func init() {
},
})
}
```
{{< /highlight >}}
You should always copy the changed parts of the struct you're changing when adding migrations.
You should always copy the changed parts of the struct you're changing when adding migraitons.

View File

@ -1,5 +1,5 @@
---
date: "2022-09-21:00:00+02:00"
date: "2019-02-12:00:00+02:00"
title: "Development"
toc: true
draft: false
@ -12,86 +12,56 @@ menu:
# Development
{{< table_of_contents >}}
## General
We use go modules to vendor libraries for Vikunja, so you'll need at least go `1.11` to use these.
If you don't intend to add new dependencies, go `1.9` and above should be fine.
To contribute to Vikunja, fork the project and work on the main branch.
Once you feel like your changes are ready, open a PR in the respective repo [on our Gitea instance](https://kolaente.dev/vikunja).
We cannot accept PRs on mirror sites.
A maintainer will take a look and give you feedback. Once everyone is happy, the PR gets merged and released.
If you plan to do a bigger change, it is better to open an issue for discussion first.
The main repo is [`vikunja/vikunja`](https://kolaente.dev/vikunja/vikunja), it contains all code for the api, frontend and desktop applications.
## API
You'll need at least Go 1.21 to build Vikunja's api.
A lot of developing tasks are automated using a Magefile, so make sure to [take a look at it]({{< ref "mage.md">}}).
Make sure to check the other doc articles for specific development tasks like [testing]({{< ref "test.md">}}),
[database migrations]({{< ref "db-migrations.md" >}}) and the [project structure]({{< ref "structure.md" >}}).
{{< table_of_contents >}}
## Frontend requirements
## Libraries
The code for the frontend is located in the `frontend` sub folder of the main repo.
More instructions can be found in the repo's README.
We keep all libraries used for Vikunja around in the `vendor/` folder to still be able to build the project even if
some maintainers take their libraries down like [it happened in the past](https://github.com/jteeuwen/go-bindata/issues/5).
You need to have [pnpm](https://pnpm.io/) and Node.JS in version 20 or higher installed.
## Tests
## Pull Requests
See [testing]({{< ref "test.md">}}).
All Pull Requests must be made [on our Gitea instance](https://kolaente.dev/vikunja).
We cannot accept PRs on mirror sites.
#### Development using go modules
Please try to make your pull request easy to review.
For that, please read the [*Best Practices for Faster Reviews*](https://github.com/kubernetes/community/blob/261cb0fd089b64002c91e8eddceebf032462ccd6/contributors/guide/pull-requests.md#best-practices-for-faster-reviews) guide.
It has lots of useful tips for any project you may want to contribute to.
Some of the key points:
If you're able to use go modules, you can clone the project wherever you want to and work from there.
- Make small pull requests.
The smaller, the faster to review and the more likely it will be merged soon.
- Don't make changes unrelated to your PR.
Maybe there are typos on some comments, maybe refactoring would be welcome on a function…
but if that is not related to your PR, please make *another* PR for that.
- Split big pull requests into multiple small ones.
An incremental change will be faster to review than a huge PR.
- Allow edits by maintainers. This way, the maintainers will take care of merging the PR later on instead of you.
#### Development-setup without go modules
### PR title and summary
Some internal packages are referenced using their respective package URL. This can become problematic.
To “trick” the Go tool into thinking this is a clone from the official repository, download the source code
into `$GOPATH/code.vikunja.io/api`. Fork the Vikunja repository, it should then be possible to switch the source directory on the command line.
In the PR title, describe the problem you are fixing, not how you are fixing it.
Use the first comment as a summary of your PR.
In the PR summary, you can describe exactly how you are fixing this problem.
Keep this summary up-to-date as the PR evolves.
{{< highlight bash >}}
cd $GOPATH/src/code.vikunja.io/api
{{< /highlight >}}
If your PR changes the UI, you must add **after** screenshots in the PR summary.
If your PR closes an issue, you must note that in a way that both GitHub and Gitea understand, i.e. by appending a paragraph like
To be able to create pull requests, the forked repository should be added as a remote to the Vikunja sources, otherwise changes cant be pushed.
```text
Fixes/Closes/Resolves #<ISSUE_NR_X>.
Fixes/Closes/Resolves #<ISSUE_NR_Y>.
```
{{< highlight bash >}}
git remote rename origin upstream
git remote add origin git@git.kolaente.de:<USERNAME>/api.git
git fetch --all --prune
{{< /highlight >}}
to your summary.
Each issue that will be closed must stand on a separate line.
This should provide a working development environment for Vikunja. Take a look at the Magefile to get an overview about
the available tasks. The most common tasks should be `mage test:unit` which will start our test environment and `mage build:build`
which will build a vikunja binary into the working directory. Writing test cases is not mandatory to contribute, but it
is highly encouraged and helps developers sleep at night.
If your PR is related to a discussion in the forum, you must add a link to the forum discussion.
Thats it! You are ready to hack on Vikunja. Test changes, push them to the repository, and open a pull request.
### Git flow
## Static assets
The `main` branch is the latest and bleeding edge branch with all changes. Unstable releases are automatically created from this branch.
New Pull-Requests should be made against the `main` branch.
Each Vikunja release contains all static assets directly compiled into the binary.
To prevent this during development, use the `dev` tag when developing.
A release gets tagged from the main branch with the version name as tag name.
Backports and point-releases should go to a `release/version` branch, based on the tag they are building on top of.
## Conventional Commits
We're using [Conventional Commits](https://www.conventionalcommits.org/en/v1.0.0/) because they simplify generating release notes a lot.
It is not required to use them when creating a PR, but appreciated.
See the [mage docs](mage.md#statically-compile-all-templates-into-the-binary) about how to compile with static assets for a release.

View File

@ -28,7 +28,7 @@ This document explains how events and listeners work in Vikunja, how to use them
Each event has to implement this interface:
```go
```golang
type Event interface {
Name() string
}
@ -42,7 +42,7 @@ You then get the event with all its data back in the listener, see below.
#### Naming Convention
Event names should roughly have the entity they're dealing with on the left and the action on the right of the name, separated by `.`.
There's no limit to how "deep" or specific an event name can be.
There's no limit to how "deep" or specifig an event name can be.
The name should have the most general concept it's describing at the left, getting more specific on the right of it.
@ -75,7 +75,7 @@ To dispatch an event, simply call the `events.Dispatch` method and pass in the e
The `TaskCreatedEvent` is declared in the `pkg/models/events.go` file as follows:
```go
```golang
// TaskCreatedEvent represents an event where a task has been created
type TaskCreatedEvent struct {
Task *Task
@ -90,7 +90,7 @@ func (t *TaskCreatedEvent) Name() string {
It is dispatched in the `createTask` function of the `models` package:
```go
```golang
func createTask(s *xorm.Session, t *Task, a web.Auth, updateAssignees bool) (err error) {
// ...
@ -104,7 +104,7 @@ func createTask(s *xorm.Session, t *Task, a web.Auth, updateAssignees bool) (err
}
```
As you can see, the current task and doer are injected into it.
As you can see, the curent task and doer are injected into it.
### Special Events
@ -122,7 +122,7 @@ A single event can have multiple listeners who are independent of each other.
All listeners must implement this interface:
```go
```golang
// Listener represents something that listens to events
type Listener interface {
Handle(msg *message.Message) error
@ -133,7 +133,7 @@ type Listener interface {
The `Handle` method is executed when the event this listener listens on is dispatched.
* As the single parameter, it gets the payload of the event, which is the event struct when it was dispatched decoded as json object and passed as a slice of bytes.
To use it you'll need to unmarshal it. Unfortunately there's no way to pass an already populated event object to the function because we would not know what type it has when parsing it.
* If the handler returns an error, the listener is retried 5 times, with an exponential back-off period in between retries.
* If the handler returns an error, the listener is retried 5 times, with an exponentional back-off period in between retries.
If it still fails after the fifth retry, the event is nack'd and it's up to the event dispatcher to resend it.
You can learn more about this mechanism in the [watermill documentation](https://watermill.io/docs/middlewares/#retry).
@ -148,7 +148,7 @@ The easiest way to create a new listener for an event is with mage:
mage dev:make-listener <listener-name> <event-name> <package>
```
This will create a new listener type in the `pkg/<package>/listeners.go` file and implement the `Handle` and `Name` methods.
This will create a new listener type in the `pkg/<package>/listners.go` file and implement the `Handle` and `Name` methods.
It will also pre-generate some boilerplate code to unmarshal the event from the payload.
Furthermore, it will register the listener for its event in the `RegisterListeners()` method of the same file.
@ -157,7 +157,7 @@ This function is called at startup and has to contain all events you want to lis
### Listening for Events
To listen for an event, you need to register the listener for the event it should be called for.
This usually happens in the `RegisterListeners()` method in `pkg/<package>/listeners.go` which is called at start up.
This usually happens in the `RegisterListeners()` method in `pkg/<package>/listners.go` which is called at start up.
The listener will never be executed if it hasn't been registered.
@ -165,7 +165,7 @@ See the example below.
### Example
```go
```golang
// RegisterListeners registers all event listeners
func RegisterListeners() {
events.RegisterListener((&ListCreatedEvent{}).Name(), &IncreaseListCounter{})
@ -179,7 +179,7 @@ func (s *IncreaseTaskCounter) Name() string {
return "task.counter.increase"
}
// Handle is executed when the event IncreaseTaskCounter listens on is fired
// Hanlde is executed when the event IncreaseTaskCounter listens on is fired
func (s *IncreaseTaskCounter) Handle(payload message.Payload) (err error) {
return keyvalue.IncrBy(metrics.TaskCountKey, 1)
}
@ -190,22 +190,6 @@ func (s *IncreaseTaskCounter) Handle(payload message.Payload) (err error) {
When testing, you should call the `events.Fake()` method in the `TestMain` function of the package you want to test.
This prevents any events from being fired and lets you assert an event has been dispatched like so:
```go
```golang
events.AssertDispatched(t, &TaskCreatedEvent{})
```
### Testing a listener
You can call an event listener manually with the `events.TestListener` method like so:
```go
ev := &TaskCommentCreatedEvent{
Task: &task,
Doer: u,
Comment: tc,
}
events.TestListener(t, ev, &SendTaskCommentNotification{})
```
This will call the listener's `Handle` method and assert it did not return an error when calling.

View File

@ -11,9 +11,9 @@ menu:
# Mage
Vikunja uses [Mage](https://magefile.org/) to script common development tasks and even releasing.
Mage is a pure go solution which allows for greater flexibility and things like better parallelization.
Mage is a pure go solution which allows for greater flexibility and things like better paralelization.
This document explains what tasks are available and what they do.
This document explains what taks are available and what they do.
{{< table_of_contents >}}
@ -31,7 +31,7 @@ go install github.com/magefile/mage
There are multiple categories of subcommands in the magefile:
* `build`: Contains commands to build a single binary
* `check`: Contains commands to statically check the source code
* `check`: Contains commands to statically check the source code
* `release`: Contains commands to release Vikunja with everything that's required
* `test`: Contains commands to run all kinds of tests
* `dev`: Contains commands to run development tasks
@ -41,26 +41,39 @@ There are multiple categories of subcommands in the magefile:
These tasks are automatically run in our CI every time someone pushes to main or you update a pull request:
* `mage lint`
* `mage check:lint`
* `mage check:fmt`
* `mage check:ineffassign`
* `mage check:misspell`
* `mage check:goconst`
* `mage build:generate`
* `mage build:build`
## Build
### Build Vikunja
```
mage build
```
{{< highlight bash >}}
mage build:build
{{< /highlight >}}
Builds a `vikunja`-binary in the root directory of the repo for the platform it is run on.
### Statically compile all templates into the binary
{{< highlight bash >}}
mage build:generate
{{< /highlight >}}
This generates static code with all templates, meaning no template need to be referenced at runtime.
### clean
```
{{< highlight bash >}}
mage build:clean
```
{{< /highlight >}}
Cleans all build and executable files
Cleans all build, executable and bindata files
## Check
@ -68,17 +81,24 @@ All check sub-commands exit with a status code of 1 if the check fails.
Various code-checks are available:
* `mage check:all`: Runs golangci and swagger documentation check
* `mage lint`: Checks if the code follows the rules as defined in the `.golangci.yml` config file.
* `mage lint:fix`: Fixes all code style issues which are easily fixable.
* `mage check:all`: Runs fmt-check, lint, got-swag, misspell-check, ineffasign-check, gocyclo-check, static-check, gosec-check, goconst-check all in parallel
* `mage check:fmt`: Checks if the code is properly formatted with go fmt
* `mage check:go-sec`: Checks the source code for potential security issues by scanning the Go AST using the [gosec tool](https://github.com/securego/gosec)
* `mage check:goconst`: Checks for repeated strings that could be replaced by a constant using [goconst](https://github.com/jgautheron/goconst/)
* `mage check:gocyclo`: Checks for the cyclomatic complexity of the source code using [gocyclo](https://github.com/fzipp/gocyclo)
* `mage check:got-swag`: Checks if the swagger docs need to be re-generated from the code annotations
* `mage check:ineffassign`: Checks the source code for ineffectual assigns using [ineffassign](https://github.com/gordonklaus/ineffassign)
* `mage check:lint`: Runs golint on all packages
* `mage check:misspell`: Checks the source code for misspellings
* `mage check:static`: Statically analyzes the source code about a range of different problems using [staticcheck](https://staticcheck.io/docs/)
## Release
### Build Releases
```
{{< highlight bash >}}
mage release
```
{{< /highlight >}}
Builds binaries for all platforms and zips them with a copy of the `templates/` folder.
All built zip files are stored into `dist/zips/`. Binaries are stored in `dist/binaries/`,
@ -96,21 +116,21 @@ binary to be able to use it.
* `mage release:check` creates sha256 checksums for each binary which will be included in the zip file
* `mage release:os-package` bundles a binary with the `sha256` checksum file, a sample `config.yml` and a copy of the license in a folder for each architecture
* `mage release:compress` compresses all build binaries with `upx` to save space
* `mage release:zip` packages a zip file for the files created by `release:os-package`
* `mage release:zip` paclages a zip file for the files created by `release:os-package`
### Build os packages
```
{{< highlight bash >}}
mage release:packages
```
{{< /highlight >}}
Will build `.deb`, `.rpm` and `.apk` packages to `dist/os-packages`.
### Make a debian repo
```
{{< highlight bash >}}
mage release:reprepro
```
{{< /highlight >}}
Takes an already built debian package and creates a debian repo structure around it.
@ -120,25 +140,25 @@ Used to be run inside a [docker container](https://git.kolaente.de/konrad/reprep
### unit
```
{{< highlight bash >}}
mage test:unit
```
{{< /highlight >}}
Runs all tests except integration tests.
### coverage
```
{{< highlight bash >}}
mage test:coverage
```
{{< /highlight >}}
Runs all tests except integration tests and generates a `coverage.html` file to inspect the code coverage.
### integration
```
{{< highlight bash >}}
mage test:integration
```
{{< /highlight >}}
Runs all integration tests.
@ -146,29 +166,27 @@ Runs all integration tests.
### Create a new migration
```
{{< highlight bash >}}
mage dev:create-migration
```
{{< /highlight >}}
Creates a new migration with the current date.
Creates a new migration with the current date.
Will ask for the name of the struct you want to create a migration for.
See also [migration docs]({{< ref "mage.md" >}}).
## Misc
### Format the code
```
{{< highlight bash >}}
mage fmt
```
{{< /highlight >}}
Formats all source code using `go fmt`.
### Generate swagger definitions from code comments
```
{{< highlight bash >}}
mage do-the-swag
```
{{< /highlight >}}
Generates swagger definitions from the comment annotations in the code.

View File

@ -14,14 +14,7 @@ It is possible to migrate data from other to-do services to Vikunja.
To make this easier, we have put together a few helpers which are documented on this page.
In general, each migrator implements a migrator interface which is then called from a client.
The interface makes it possible to use helper methods which handle http and focus only on the implementation of the migrator itself.
There are two ways of migrating data from another service:
1. Through the auth-based flow where the user gives you access to their data at the third-party service through an oauth flow. You can then call the service's api on behalf of your user to get all the data. The Todoist, Trello and Microsoft To-Do Migrators use this pattern.
2. A file migration where the user uploads a file obtained from some third-party service. In your migrator, you need to parse the file and create the projects, tasks etc. The Vikunja File Import uses this pattern.
To differentiate the two, there are two different interfaces you must implement.
The interface makes it possible to use helper methods which handle http an focus only on the implementation of the migrator itself.
{{< table_of_contents >}}
@ -30,37 +23,23 @@ To differentiate the two, there are two different interfaces you must implement.
All migrator implementations live in their own package in `pkg/modules/migration/<name-of-the-service>`.
When creating a new migrator, you should place all related code inside that module.
## Migrator Interface
## Migrator interface
The migrator interface is defined as follows:
```go
// Migrator is the basic migrator interface which is shared among all migrators
type Migrator interface {
// Name holds the name of the migration.
// This is used to show the name to users and to keep track of users who already migrated.
Name() string
// Migrate is the interface used to migrate a user's tasks from another platform to Vikunja.
// Migrate is the interface used to migrate a user's tasks from another platform to vikunja.
// The user object is the user who's tasks will be migrated.
Migrate(user *models.User) error
// AuthURL returns a url for clients to authenticate against.
// The use case for this are Oauth flows, where the server token should remain hidden and not
// known to the frontend.
AuthURL() string
}
```
## File Migrator Interface
```go
// FileMigrator handles importing Vikunja data from a file. The implementation of it determines the format.
type FileMigrator interface {
// Name holds the name of the migration.
// This is used to show the name to users and to keep track of users who already migrated.
Name() string
// Migrate is the interface used to migrate a user's tasks, projects and other things from a file to Vikunja.
// The user object is the user who's tasks will be migrated.
Migrate(user *user.User, file io.ReaderAt, size int64) error
}
```
@ -75,57 +54,45 @@ authUrl, Status and Migrate methods.
```go
// This is an example for the Wunderlist migrator
if config.MigrationWunderlistEnable.GetBool() {
wunderlistMigrationHandler := &migrationHandler.MigrationWeb{
wunderlistMigrationHandler := &migrationHandler.MigrationWeb{
MigrationStruct: func() migration.Migrator {
return &wunderlist.Migration{}
},
}
wunderlistMigrationHandler.RegisterRoutes(m)
wunderlistMigrationHandler.RegisterRoutes(m)
}
```
And for the file migrator:
```go
vikunjaFileMigrationHandler := &migrationHandler.FileMigratorWeb{
MigrationStruct: func() migration.FileMigrator {
return &vikunja_file.FileMigrator{}
},
}
vikunjaFileMigrationHandler.RegisterRoutes(m)
```
You should also document the routes with [swagger annotations]({{< ref "swagger-docs.md" >}}).
You should also document the routes with [swagger annotations]({{< ref "../practical-instructions/swagger-docs.md" >}}).
## Insertion helper method
There is a method available in the `migration` package which takes a fully nested Vikunja structure and creates it with all relations.
This means you start by adding a project, then add projects inside that project, then tasks in the lists and so on.
In general, it is reccommended to have one root project with all projects of the other service as child projects.
There is a method available in the `migration` package which takes a fully nested Vikunja structure and creates it with all relations.
This means you start by adding a namespace, then add lists inside of that namespace, then tasks in the lists and so on.
The root structure must be present as `[]*models.ProjectWithTasksAndBuckets`. It allows to represent all of Vikunja's hierarchy as a single data structure.
The root structure must be present as `[]*models.NamespaceWithLists`.
Then call the method like so:
```go
fullVikunjaHierarchy, err := convertWunderlistToVikunja(wContent)
fullVikunjaHierachie, err := convertWunderlistToVikunja(wContent)
if err != nil {
return
}
err = migration.InsertFromStructure(fullVikunjaHierarchy, user)
err = migration.InsertFromStructure(fullVikunjaHierachie, user)
```
## Configuration
If your migrator is an oauth-based one, you should add at least an option to enable or disable it.
Chances are, you'll need some more options for things like client ID and secret (if the other service uses oAuth as an authentication flow).
You should add at least an option to enable or disable the migration.
Chances are, you'll need some more options for things like client ID and secret
(if the other service uses oAuth as an authentication flow).
The easiest way to implement an on/off switch is to check whether your migration service is enabled or not when registering the routes, and then simply don't registering the routes in case it is disabled.
The easiest way to implement an on/off switch is to check whether your migration service is enabled or not when
registering the routes, and then simply don't registering the routes in the case it is disabled.
File based migrators can always be enabled.
### Making the migrator public in `/info`
### Making the migrator public in `/info`
You should make your migrator available in the `/info` endpoint so that frontends can display options to enable them or not.
To do this, add an entry to the `AvailableMigrators` field in `pkg/routes/api/v1/info.go`.
To do this, add an entry to `pkg/routes/api/v1/info.go`.

View File

@ -10,7 +10,7 @@ menu:
# Notifications
Vikunja provides a simple abstraction to send notifications per mail and in the database.
Vikunjs provides a simple abstraction to send notifications per mail and in the database.
{{< table_of_contents >}}
@ -18,7 +18,7 @@ Vikunja provides a simple abstraction to send notifications per mail and in the
Each notification has to implement this interface:
```go
```golang
type Notification interface {
ToMail() *Mail
ToDB() interface{}
@ -35,11 +35,11 @@ For example, if your notification should not be recorded in the database but onl
A list of chainable functions is available to compose a mail:
```go
```golang
mail := NewMail().
// The optional sender of the mail message.
From("test@example.com").
// The optional recipient of the mail message. Uses the mail address of the notifiable if omitted.
// The optional receipient of the mail message. Uses the mail address of the notifiable if omitted.
To("test@otherdomain.com").
// The subject of the mail to send.
Subject("Testmail").
@ -49,7 +49,7 @@ mail := NewMail().
Line("This is a line of text").
// An action can contain a title and a url. It gets rendered as a big button in the mail.
// Note that you can have only one action per mail.
// All lines added before an action will appear in the mail before the button, all lines
// All lines added before an action will appearr in the mail before the button, all lines
// added afterwards will appear after it.
Action("The Action", "https://example.com").
// Another line of text.
@ -60,7 +60,8 @@ If not provided, the `from` field of the mail contains the value configured in [
### Database notifications
All data returned from the `ToDB()` method is serialized to json and saved into the database, along with the id of the notifiable, the name of the notification and a time stamp.
All data returned from the `ToDB()` method is serialized to json and saved into the database, along with the id of the
notifiable, the name of the notification and a time stamp.
If you don't use the database notification, the `Name()` function can return an empty string.
## Creating a new notification
@ -74,7 +75,7 @@ It takes the name of the notification and the package where the notification wil
Notifiables can receive a notification.
A notifiable is defined with this interface:
```go
```golang
type Notifiable interface {
// Should return the email address this notifiable has.
RouteForMail() string
@ -92,7 +93,7 @@ It takes a notifiable and a notification as input.
For example, the email confirm notification when a new user registers is sent like this:
```go
```golang
n := &EmailConfirmNotification{
User: update.User,
IsNew: false,
@ -107,13 +108,6 @@ return
The `mail` package provides a `Fake()` method which you should call in the `MainTest` functions of your package.
If it was called, no mails are being sent and you can instead assert they have been sent with the `AssertSent` method.
When testing, you should call the `notifications.Fake()` method in the `TestMain` function of the package you want to test.
This prevents any notifications from being sent and lets you assert a notifications has been sent like this:
```go
notifications.AssertSent(t, &ReminderDueNotification{})
```
## Example
Take a look at the [pkg/user/notifications.go](https://code.vikunja.io/api/src/branch/main/pkg/user/notifications.go) file for a good example.

View File

@ -1,33 +0,0 @@
---
title: "Releasing a new Vikunja version"
date: 2022-10-28T13:06:05+02:00
draft: false
menu:
sidebar:
parent: "development"
---
# Releasing a new Vikunja version
This checklist is a collection of all steps usually involved when releasing a new version of Vikunja.
Not all steps are necessary for every release.
* Website update
* New Features: If there are new features worth mentioning the feature page should be updated.
* New Screenshots: If an overhaul of an existing feature happened so that it now looks different from the existing screenshot, a new one is required.
* Generate changelogs (with git-cliff)
* Tag a new version: Include the changelog for that version as the tag message
* Once built: Prune the cloudflare cache so that the new versions show up at [dl.vikunja.io](https://dl.vikunja.io/)
* Update the [Flathub desktop package](https://github.com/flathub/io.vikunja.Vikunja)
* Release Highlights Blogpost
* Include a section about Vikunja in general (totally fine to copy one from the earlier blog posts)
* New Features & Improvements: Mention bigger features, potentially with screenshots. Things like refactoring are sometimes also worth mentioning.
* Publish
* Reddit
* Twitter
* Mastodon
* Chat
* Newsletter
* Forum
* If features in the release were sponsored, send an email to relevant stakeholders
* Update Vikunja Cloud version and other instances

View File

@ -1,6 +1,6 @@
---
date: "2019-02-12:00:00+02:00"
title: "Project Structure"
title: "Project structure"
draft: false
type: "doc"
menu:
@ -10,7 +10,40 @@ menu:
# Project structure
This document explains what each package does.
In general, this api repo has the following structure:
* `docker`
* `docs`
* `pkg`
* `caldav`
* `cmd`
* `config`
* `db`
* `fixtures`
* `files`
* `integration`
* `log`
* `mail`
* `metrics`
* `migration`
* `models`
* `modules`
* `migration`
* `handler`
* `wunderlist`
* `red`
* `routes`
* `api/v1`
* `static`
* `swagger`
* `user`
* `utils`
* `version`
* `REST-Tests`
* `templates`
* `vendor`
This document will explain what these mean and what you can find where.
{{< table_of_contents >}}
@ -19,13 +52,18 @@ This document explains what each package does.
The root directory is where [the config file]({{< ref "../setup/config.md">}}), [Magefile]({{< ref "mage.md">}}), license, drone config,
application entry point (`main.go`) and so on are located.
## docker
This directory holds additonal files needed to build and run the docker container, mainly service configuration to properly run Vikunja inside a docker
container.
## pkg
This is where most of the magic happens. Most packages with actual code are located in this folder.
### caldav
This folder holds a simple caldav implementation which is responsible for the caldav feature.
This folder holds a simple caldav implementation which is responsible for returning the caldav feature.
### cmd
@ -37,15 +75,10 @@ To learn more about how to use this cli, see [the cli usage docs]({{< ref "../us
### config
This package configures handling of Vikunja's runtime configuration.
It sets default values and sets up viper and tells it where to look for config files, how to interpret which env variables
for config etc.
This package configures the config. It sets default values and sets up viper and tells it where to look for config files,
how to interpret which env variables for config etc.
See also the [docs about adding a new configuration parameter]({{< ref "config.md" >}}).
### cron
See [how to add a cron task]({{< ref "cron.md" >}}).
If you want to add a new config parameter, you should add default value in this package.
### db
@ -64,22 +97,22 @@ See [integration tests]({{< ref "test.md" >}}#integration-tests) for more detail
### log
Similar to `config`, this will set up the logging, based on different logging backends.
Similar to `config`, this will set up the logging, based on differen logging backends.
This init is called in `main.go` after the config init is done.
### mail
This package handles all mail sending. To learn how to send a mail, see [notifications]({{< ref "notifications.md" >}}).
This package handles all mail sending. To learn how to send a mail, see [sending emails]({{< ref "../practical-instructions/mail.md">}}).
### metrics
This package handles all metrics which are exposed to the prometheus endpoint.
To learn how it works and how to add new metrics, take a look at [how metrics work]({{< ref "metrics.md">}}).
To learn how it works and how to add new metrics, take a look at [how metrics work]({{< ref "../practical-instructions/metrics.md">}}).
### migration
This package handles all migrations.
All migrations are stored and executed in this package.
All migrations are stored and executed here.
To learn more, take a look at the [migrations docs]({{< ref "../development/db-migrations.md">}}).
@ -90,35 +123,11 @@ When adding new features or upgrading existing ones, that most likely happens he
Because this package is pretty huge, there are several documents and how-to's about it:
* [Adding a feature]({{< ref "feature.md">}})
* [Making calls to the database]({{< ref "database.md">}})
* [Adding a feature]({{< ref "../practical-instructions/feature.md">}})
* [Making calls to the database]({{< ref "../practical-instructions/database.md">}})
### modules
Everything that can have multiple implementations (like a task migrator from a third-party task provider) lives in a
respective sub package in this package.
#### auth
Contains openid related authentication.
#### avatar
Contains all possible avatar providers a user can choose to set their avatar.
#### background
All project background providers are in sub-packages of this package.
#### dump
Handles everything related to the `dump` and `restore` commands of Vikunja.
#### keyvalue
A simple key-value store with an implementation for memory and redis.
Can be used to cache values.
#### migration
See [writing a migrator]({{< ref "migration.md" >}}).
@ -126,29 +135,30 @@ See [writing a migrator]({{< ref "migration.md" >}}).
### red (redis)
This package initializes a connection to a redis server.
This initialization is automatically done at the startup of Vikunja.
This inizialization is automatically done at the startup of vikunja.
It also has a function (`GetRedis()`) which returns a redis client object you can then use in your package
to talk to redis.
It uses the [go-redis](https://github.com/go-redis/redis) library, please see their configuration on how to use it.
**Note**: Only use this package directly if you have to use a direct redis connection.
In most cases, using the `keyvalue` package is a better fit.
### routes
This package defines all routes which are available for Vikunja clients to use.
To add a new route, see [adding a new route]({{< ref "feature.md">}}).
This package defines all routes which are available for vikunja clients to use.
To add a new route, see [adding a new route]({{< ref "../practical-instructions/feature.md">}}).
#### api/v1
This is where all http-handler functions for the api are stored.
Every handler function which does not use the standard web handler should live here.
### static
All static files generated by `mage generate` live here.
### swagger
This is where the [generated]({{< ref "mage.md#generate-swagger-definitions-from-code-comments">}}) [api docs]({{< ref "../usage/api.md">}}) live.
This is where the [generated]({{< ref "mage.md#generate-swagger-definitions-from-code-comments">}} [api docs]({{< ref "../usage/api.md">}}) live.
You usually don't need to touch this package.
### user
@ -162,9 +172,30 @@ A small package, containing some helper functions:
* `MakeRandomString`: Generates a random string of a given length.
* `Sha256`: Calculates a sha256 hash from a given string.
See their function definitions for instructions on how to use them.
See their function definitions for instructions on how to use them.
### version
The single purpose of this package is to hold the current Vikunja version which gets overridden through build flags each time `mage release` or `mage build` is run.
It is a separate package to avoid import cycles with other packages.
The single purpouse of this package is to hold the current vikunja version which gets overridden through build flags
each time `mage release` or `mage build` is run.
It is a seperate package to avoid import cycles with other packages.
## REST-Tests
Holds all kinds of test files to directly test the api from inside of [jetbrains ide's](https://www.jetbrains.com/help/idea/http-client-in-product-code-editor.html).
These files are currently more an experiment, maybe we will drop them in the future to use something we could integrate in the testing process with drone.
Therefore, this has no claim to be complete yet even working, you're free to change whatever is needed to get it working for you.
## templates
Holds the email templates used to send plain text and html emails for new user registration and password changes.
## vendor
All libraries needed to build Vikunja.
We keep all libraries used for Vikunja around in the `vendor/` folder to still be able to build the project even if
some maintainers take their libraries down like [it happened in the past](https://github.com/jteeuwen/go-bindata/issues/5).
When adding a new dependency, make sure to run `go mod vendor` to put it inside this directory.

View File

@ -1,96 +0,0 @@
---
date: "2019-02-12:00:00+02:00"
title: "Modifying Swagger API Docs"
draft: false
type: "doc"
menu:
sidebar:
parent: "development"
---
# Modifying swagger api docs
The api documentation is generated using [swaggo](https://github.com/swaggo/swag) from comments.
{{< table_of_contents >}}
## Documenting structs
You should always comment every field which will be exposed as a json in the api.
These comments will show up in the documentation, it'll make it easier for developers using the api.
As an example, this is the definition of a project with all comments:
```go
type Project struct {
// The unique, numeric id of this project.
ID int64 `xorm:"bigint autoincr not null unique pk" json:"id" param:"project"`
// The title of the project. You'll see this in the overview.
Title string `xorm:"varchar(250) not null" json:"title" valid:"required,runelength(1|250)" minLength:"1" maxLength:"250"`
// The description of the project.
Description string `xorm:"longtext null" json:"description"`
// The unique project short identifier. Used to build task identifiers.
Identifier string `xorm:"varchar(10) null" json:"identifier" valid:"runelength(0|10)" minLength:"0" maxLength:"10"`
// The hex color of this project
HexColor string `xorm:"varchar(6) null" json:"hex_color" valid:"runelength(0|6)" maxLength:"6"`
OwnerID int64 `xorm:"bigint INDEX not null" json:"-"`
ParentProjectID int64 `xorm:"bigint INDEX null" json:"parent_project_id"`
ParentProject *Project `xorm:"-" json:"-"`
// The user who created this project.
Owner *user.User `xorm:"-" json:"owner" valid:"-"`
// Whether a project is archived.
IsArchived bool `xorm:"not null default false" json:"is_archived" query:"is_archived"`
// The id of the file this project has set as background
BackgroundFileID int64 `xorm:"null" json:"-"`
// Holds extra information about the background set since some background providers require attribution or similar. If not null, the background can be accessed at /projects/{projectID}/background
BackgroundInformation interface{} `xorm:"-" json:"background_information"`
// Contains a very small version of the project background to use as a blurry preview until the actual background is loaded. Check out https://blurha.sh/ to learn how it works.
BackgroundBlurHash string `xorm:"varchar(50) null" json:"background_blur_hash"`
// True if a project is a favorite. Favorite projects show up in a separate parent project. This value depends on the user making the call to the api.
IsFavorite bool `xorm:"-" json:"is_favorite"`
// The subscription status for the user reading this project. You can only read this property, use the subscription endpoints to modify it.
// Will only returned when retrieving one project.
Subscription *Subscription `xorm:"-" json:"subscription,omitempty"`
// The position this project has when querying all projects. See the tasks.position property on how to use this.
Position float64 `xorm:"double null" json:"position"`
// A timestamp when this project was created. You cannot change this value.
Created time.Time `xorm:"created not null" json:"created"`
// A timestamp when this project was last updated. You cannot change this value.
Updated time.Time `xorm:"updated not null" json:"updated"`
web.CRUDable `xorm:"-" json:"-"`
web.Rights `xorm:"-" json:"-"`
}
```
## Documenting api Endpoints
All api routes should be documented with a comment above the handler function.
When generating the api docs with mage, the swagger cli will pick these up and put them in a neat document.
A comment looks like this:
```go
// @Summary Login
// @Description Logs a user in. Returns a JWT-Token to authenticate further requests.
// @tags user
// @Accept json
// @Produce json
// @Param credentials body user.Login true "The login credentials"
// @Success 200 {object} auth.Token
// @Failure 400 {object} models.Message "Invalid user password model."
// @Failure 412 {object} models.Message "Invalid totp passcode."
// @Failure 403 {object} models.Message "Invalid username or password."
// @Router /login [post]
func Login(c echo.Context) error {
// Handler logic
}
```

View File

@ -10,31 +10,32 @@ menu:
# Testing
You can run unit tests with [our `Magefile`]({{< ref "mage.md">}}) with
{{< highlight bash >}}
mage test:unit
{{< /highlight >}}
{{< table_of_contents >}}
## API Tests
## Running tests with config
The following parts are about the kinds of tests in the API package and how to run them.
You can run tests with all available config variables if you want, enabeling you to run tests for a lot of scenarios.
### Prerequesites
To use the normal config set the enviroment variable `VIKUNJA_TESTS_USE_CONFIG=1`.
To run any kind of test, you need to specify Vikunja's [root path](https://vikunja.io/docs/config-options/#rootpath).
This is required to make sure all test fixtures are correctly loaded.
## Show sql queries
The easies way to do that is to set the environment variable `VIKUNJA_SERVICE_ROOTPATH` to the path where you cloned the working directory.
When `UNIT_TESTS_VERBOSE=1` is set, all sql queries will be shown when tests are run.
### Unit tests
## Fixtures
To run unit tests with [mage]({{< ref "mage.md">}}), execute
All tests are run against a set of db fixtures.
These fixtures are defined in `pkg/models/fixtures` in YAML-Files which represent the database structure.
```
mage test:unit
```
When you add a new test case which requires new database entries to test against, update these files.
In Vikunja, everything that is not an integration test counts as unit test - even if it accesses the db.
This definition is a bit blurry, but we haven't found a better one yet.
### Integration tests
## Integration tests
All integration tests live in `pkg/integrations`.
You can run them by executing `mage test:integration`.
@ -44,25 +45,7 @@ see at the beginning of this document.
To run integration tests, use `mage test:integration`.
### Running tests with config
You can run tests with all available config variables if you want, enabling you to run tests for a lot of scenarios.
We use this in CI to run all tests with different databases.
To use the normal config set the environment variable `VIKUNJA_TESTS_USE_CONFIG=1`.
### Showing sql queries
When the environment variable `UNIT_TESTS_VERBOSE=1` is set, all sql queries will be shown during the test run.
### Fixtures
All tests are run against a set of db fixtures.
These fixtures are defined in `pkg/models/fixtures` in YAML-Files which represent the database structure.
When you add a new test case which requires new database entries to test against, update these files.
#### Initializing db fixtures when writing tests
## Initializing db fixtures when writing tests
All db fixtures for all tests live in the `pkg/db/fixtures/` folder as yaml files.
Each file has the same name as the table the fixtures are for.
@ -87,23 +70,3 @@ db.LoadAndAssertFixtures(t)
This will load all fixtures you defined in your test init method.
You should always use this method to load fixtures, the only exception is when your package tests require extra test
fixtures other than db fixtures (like files).
## Frontend tests
The frontend has end to end tests with Cypress that use a Vikunja instance and drive a browser against it.
Check out the docs [in the frontend repo](https://kolaente.dev/vikunja/vikunja/src/branch/main/frontend/cypress/README.md) about how they work and how to get them running.
### Unit Tests
To run the frontend unit tests, run
```
pnpm run test:unit
```
The frontend also has a watcher available that re-runs all unit tests every time you change something.
To use it, simply run
```
pnpm run test:unit-watch
```

View File

@ -25,51 +25,29 @@ Genauer definiert:
* “falsch” anstatt “nicht korrekt/inkorrekt”
* “Wende dich an …” anstatt “kontaktiere …”
* In derselben Zeit übersetzen (sonst wird aus dem englischen “is“ das deutsche “war”)
* Richtige Anführungszeichen verwenden. Also `„“` statt `''` oder `'` oder ` oder ´
* Richtige Anführungszeichen verwenden. Also „“ statt '' oder ' oder ` oder ´
* `„` für beginnende Anführungszeichen, `“` für schließende Anführungszeichen
Es gelten Artikel und Worttrennungen aus dem [Duden](https://duden.de).
## Formulierungen
## Gendern
* `Account` statt `Konto`.
* `TOTP` immer als ein Wort und Groß.
* `CalDAV` immer so.
* `löschen` oder `entfernen` je nach Kontext. Wenn etwas *gelöscht* wird, existiert das gelöschte Objekt und danach
nicht mehr und hat evtl. andere Objekte mitgelöscht (z.B. eine Aufgabe). Wird etwas *entfernt*, bezieht sich das
meistens auf die Beziehung zu einem anderen Objekt. Das entfernte Objekt existiert danach immernoch, z.B. beim
Entfernen eine:r Nutzer:in aus einem Team.
* Analog zu `löschen` oder `entfernen` gilt ähnliches für `hinzufügen` oder `erstellen`. Eine Aufgabe wird *erstellt*,
aber ein:e Nutzer:in nur zu einem Team *hinzugefügt*.
* `Anmeldename` anstatt `Benutzer:innenname`
Wo möglich, sollte eine geschlechtsneutrale Anrede verwendet werden.
Falls diese sehr umständlich würden (siehe oben „Amtsdeutsch-Umschreibungen“), soll mit *Doppelpunkt* gegendert werden.
Beispiel: „Benutzer:in“
## Formulierungen in Modals und Buttons
Es sollten die gleichen Formulierungen auf Buttons und Modals verwendet werden.
Beispiel: Wenn der Button mit `löschen` beschriftet ist, sollte im Modal die Frage
lauten `Willst du das wirklich löschen?` und nicht `Willst du das wirklich entfernen?`. Gleiches gilt für
Erfolgs/Fehlermeldungen nach der Aktion.
## Gendern
Wo möglich, sollte eine geschlechtsneutrale Anrede verwendet werden. Falls diese sehr umständlich würden (siehe oben
„Amtsdeutsch-Umschreibungen“), soll mit *Doppelpunkt* gegendert werden.
Beispiel: „Benutzer:in“
Beispiel: Wenn der Button mit `löschen` beschriftet ist, sollte im Modal die Frage lauten `Willst du das wirklich löschen?` und nicht `Willst du das wirklich entfernen?`.
Gleiches gilt für Erfolgs/Fehlermeldungen nach der Aktion.
## Trennungen
* E-Mail-Adresse (siehe Duden)
## Wörter und Ausdrücke
| Englisches Original | Verwendung in deutscher Übersetzung |
| ------------------- | -------------------- |
| Bucket | Spalte |
| Link Share | Linkfreigabe |
| Username | Anmeldename |
## Weiterführende Links
* https://docs.translatehouse.org/projects/localization-guide/en/latest/guide/translation_guidelines_german.html
* http://docs.translatehouse.org/projects/localization-guide/en/latest/guide/translation_guidelines_german.html

View File

@ -15,13 +15,13 @@ This document provides documentation about how to translate Vikunja.
## Where to translate
Translation happens at [crowdin](https://crowdin.com/project/vikunja).
Translation happen at [weblate](https://hosted.weblate.org/projects/vikunja/frontend/).
Currently, only the frontend (and by extension, the desktop app) is translatable.
## Translation Instructions
> These are the instructions for translating Vikunja in another language.
> These are the instructions for translating Vikunja in another language.
> For information about how to add new translation strings, see below.
For all languages these translation guidelines should be applied when translating:
@ -44,11 +44,11 @@ Instead, translate it to reflect the original meaning in the translated string b
All translation strings are stored in `src/i18n/lang/`.
New strings should be added only in the `en.json` file.
Strings in other languages will be synced through [crowdin](https://crowdin.com/project/vikunja) and should not be added directly as a PR/commit in the frontend repo.
Strings in other languages will be synced through weblate and should not be added directly as a PR/commit in the frontend repo.
## Requesting a new language
If you want to start translating Vikunja in a language not yet available in Vikunja, please request the language through the crowdin interface.
If you have issues with this or need a discussion before doing so, please [contact us](https://vikunja.io/contact/) or [start a discussion in the forum](https://community.vikunja.io).
If you want to start translating Vikunja in a language not yet available in Vikunja, please request the language through the weblate interface.
If you have issues with this or need a discussion before doing so, pleace [contact us](https://vikunja.io/contact/) or [start a discussion in the forum](https://community.vikunja.io).
Once at least 50% of all translation strings are translated and approved, they will be added and distributed with the Vikunja frontend for users to select and use Vikunja with them.

View File

@ -0,0 +1,40 @@
---
date: "2019-02-12:00:00+02:00"
title: "Database"
draft: false
type: "doc"
menu:
sidebar:
parent: "practical instructions"
---
# Database
Vikunja uses [xorm](http://xorm.io/) as an abstraction layer to handle the database connection.
Please refer to [their](http://xorm.io/docs/) documentation on how to exactly use it.
Inside the `models` package, a variable `x` is available which contains a pointer to an instance of `xorm.Engine`.
This is used whenever you make a call to the database to get or update data.
This xorm instance is set up and initialized every time vikunja is started.
{{< table_of_contents >}}
## Adding new database tables
To add a new table to the database, add a an instance of your struct to the `tables` variable in the
init function in `pkg/models/models.go`. Xorm will sync them automatically.
You also need to add a pointer to the `tablesWithPointer` slice to enable caching for all instances of this struct.
To learn more about how to configure your struct to create "good" tables, refer to [the xorm documentaion](http://xorm.io/docs/).
## Adding data to test fixtures
Adding data for test fixtures is done in via `yaml` files insinde of `pkg/models/fixtures`.
The name of the yaml file should equal the table name in the database.
Adding values to it is done via array definition inside of the yaml file.
**Note**: Table and column names need to be in snake_case as that's what is used internally in the database
and for mapping values from the database to xorm so your structs can use it.

View File

@ -5,7 +5,7 @@ draft: false
type: "doc"
menu:
sidebar:
parent: "development"
parent: "practical instructions"
---
# Custom Errors
@ -13,14 +13,14 @@ menu:
All custom errors are defined in `pkg/models/errors.go`.
You should add new ones in this file.
Custom errors usually have fields for the http return code, a [Vikunja-specific error code]({{< ref "../usage/errors.md">}})
Custom errors usually have fields for the http return code, a [vikunja-specific error code]({{< ref "../usage/errors.md">}})
and a human-readable error message about what went wrong.
An error consists of multiple functions and definitions:
```go
{{< highlight golang >}}
// This struct holds any information about this specific error.
// In this case, it contains the user ID of a nonexistent user.
// In this case, it contains the user ID of a nonexistand user.
// This type should always be a struct, even if it has no values in it.
// ErrUserDoesNotExist represents a "UserDoesNotExist" kind of error.
@ -44,21 +44,21 @@ func (err ErrUserDoesNotExist) Error() string {
return fmt.Sprintf("User does not exist [user id: %d]", err.UserID)
}
// This const holds the Vikunja error code used to be able to identify this error without having to
// This const holds the vikunja error code used to be able to identify this error without having to
// rely on an error string.
// This needs to be unique, so you should check whether the error code exists or not.
// The general convention for error codes is as follows:
// * Every "group" errors lives in a thousend something. For example all user issues are 1000-something, all
// project errors are 3000-something and so on.
// list errors are 3000-something and so on.
// * New error codes should be the current max error code + 1. Don't take free numbers to prevent old errors
// which are deprecated and removed from being "new ones". For example, if there are error codes 1001, 1002, 1004,
// which are depricated and removed from being "new ones". For example, if there are error codes 1001, 1002, 1004,
// a new error should be 1005 and not 1003.
// ErrCodeUserDoesNotExist holds the unique world-error code of this error
const ErrCodeUserDoesNotExist = 1005
// This is the implementation which returns an http error which is then passed to the client.
// Here you define the http status code with which one the error will be returned, the Vikunja error code and
// Here you define the http status code with which one the error will be returned, the vikunja error code and
// a human-readable error message.
// HTTPError holds the http error description
@ -69,4 +69,4 @@ func (err ErrUserDoesNotExist) HTTPError() web.HTTPError {
Message: "The user does not exist.",
}
}
```
{{< /highlight >}}

View File

@ -1,11 +1,11 @@
---
date: "2019-02-12:00:00+02:00"
title: "New API Endpoints"
title: "Add a new api endpoint"
draft: false
type: "doc"
menu:
sidebar:
parent: "development"
parent: "practical instructions"
---
# Add a new api endpoint/feature
@ -25,9 +25,9 @@ It returns the `limit` (max-length) and `offset` parameters needed for SQL-Queri
You can feed this function directly into xorm's `Limit`-Function like so:
```go
projects := []*Project{}
err := x.Limit(getLimitFromPageIndex(pageIndex, itemsPerPage)).Find(&projects)
```
{{< highlight golang >}}
lists := []List{}
err := x.Limit(getLimitFromPageIndex(pageIndex, itemsPerPage)).Find(&lists)
{{< /highlight >}}
// TODO: Add a full example from start to finish, like a tutorial on how to create a new endpoint?

View File

@ -0,0 +1,86 @@
---
date: "2019-02-12:00:00+02:00"
title: "Mailer"
draft: false
type: "doc"
menu:
sidebar:
parent: "practical instructions"
---
# Mailer
This document explains how to use the mailer to send emails and what to do to create a new kind of email to be sent.
{{< table_of_contents >}}
## Sending emails
**Note:** You should use mail templates whenever possible (see below).
To send an email, use the function `mail.SendMail(options)`. The options are defined as follows:
{{< highlight golang >}}
type Opts struct {
To string // The email address of the recipent
Subject string // The subject of the mail
Message string // The plaintext message in the mail
HTMLMessage string // The html message
ContentType ContentType // The content type of the mail. Can be either mail.ContentTypePlain, mail.ContentTypeHTML, mail.ContentTypeMultipart. You should set this according to the kind of mail you want to send.
Boundary string
Headers []*header // Other headers to set in the mail.
}
{{< /highlight >}}
### Sending emails based on a template
For each mail with a template, there are two email templates: One for plaintext emails, one for html emails.
These are located in the `templates/mail` folder and follow the conventions of `template-name.{plain|hmtl}.tmpl`,
both the plaintext and html templates are in the same folder.
To send a mail based on a template, use the function `mail.SendMailWithTemplate(to, subject, tpl string, data map[string]interface{})`.
`to` and `subject` are pretty much self-explanatory, `tpl` is the name of the template, without `.html.tmpl` or `.plain.tmpl`.
`data` is a map you can pass additional data to your template.
### Sending a mail with a template
A basic html email template would look like this:
{{< highlight go-html-template >}}
{{template "mail-header.tmpl" .}}
<p>
Hey there!<br/>
This is a minimal html email example.<br/>
{{.Something}}
</p>
{{template "mail-footer.tmpl"}}
{{< /highlight >}}
And the corresponding plaintext template:
{{< highlight go-text-template >}}
Hey there!
This is a minimal html email example.
{{.Something}}
{{< /highlight >}}
You would then call this like so:
{{< highlight golang >}}
data := make(map[string]interface{})
data["Something"] = "I am some computed value"
to := "test@example.com"
subject := "A simple test mail"
tpl := "demo" // Assuming you saved the templates as demo.plain.tmpl and demo.html.tmpl
mail.SendMailWithTemplate(to, subject, tpl, data)
{{< /highlight >}}
The function does not return an error. If an error occures when sending a mail, it is logged but not returned because sending the mail happens asinchrounly.
Notice the `mail-header.tmpl` and `mail-footer.tmpl` in the template. These populate some basic css, a box for your content and the vikunja logo.
All that's left for you is to put the content in, which then will appear in a beautifully-styled box.
Remeber, these are email templates. This is different from normal html/css, you cannot use whatever you want (because most of the clients are wayyy to outdated).

View File

@ -5,7 +5,7 @@ draft: false
type: "doc"
menu:
sidebar:
parent: "development"
parent: "practical instructions"
---
# Metrics
@ -17,13 +17,13 @@ The `metrics` package provides several functions to create and update metrics.
{{< table_of_contents >}}
## Exposing New Metrics
## New metrics
First, define a `const` with the metric key in redis. This is done in `pkg/metrics/metrics.go`.
To expose a new metric, you need to register it in the `init` function inside of the `metrics` package like so:
```go
{{< highlight golang >}}
// Register total user count metric
promauto.NewGaugeFunc(prometheus.GaugeOpts{
Name: "vikunja_team_count", // The key of the metric. Must be unique.
@ -32,15 +32,16 @@ promauto.NewGaugeFunc(prometheus.GaugeOpts{
count, _ := GetCount(TeamCountKey) // TeamCountKey is the const we defined earlier.
return float64(count)
})
```
{{< /highlight >}}
Then you'll need to set the metrics initial value on every startup of Vikunja.
Then you'll need to set the metrics initial value on every startup of vikunja.
This is done in `pkg/routes/routes.go` to avoid cyclic imports.
If metrics are enabled, it checks if a redis connection is available and then sets the initial values.
A convenience function is available if the metric is based on a database struct.
Because metrics are stored in redis, you are responsible to increase or decrease these based on criteria you define.
To do this, use `metrics.UpdateCount(value, key)` where `value` is the amount you want to change it (you can pass negative values to decrease it) and `key` it the redis key used to define the metric.
To do this, use `metrics.UpdateCount(value, key)` where `value` is the amount you want to cange it (you can pass
negative values to decrease it) and `key` it the redis key used to define the metric.
## Using it

View File

@ -0,0 +1,31 @@
---
date: "2019-02-12:00:00+02:00"
title: "Adding new config options"
draft: false
type: "doc"
menu:
sidebar:
parent: "practical instructions"
---
# Adding new config options
Vikunja uses [viper](https://github.com/spf13/viper) to handle configuration options.
It handles parsing all different configuration sources.
The configuration is done in sections. These are represented with a `.` in viper.
Take a look at `pkg/config/config.go` to understand how these are set.
To add a new config option, you should add a default value to `pkg/config/config.go`.
Default values should always enable the feature to work somehow, or turn it off completely if it always needs
additional configuration.
Make sure to add the new config option to [the config document]({{< ref "../setup/config.md">}}) and the default config file
(`config.yml.sample` at the root of the repository) to make sure it is well documented.
If you're using a computed value as a default, make sure to update the sample config file and debian
post-install scripts to reflect that.
To get a configured option, use `viper.Get("config.option")`.
Take a look at [viper's documentation](https://github.com/spf13/viper#getting-values-from-viper) to learn of the
different ways available to get config options.

View File

@ -0,0 +1,47 @@
---
date: "2019-02-12:00:00+02:00"
title: "Modifying swagger api docs"
draft: false
type: "doc"
menu:
sidebar:
parent: "practical instructions"
---
# Adding/editing swagger api docs
The api documentation is generated using [swaggo](https://github.com/swaggo/swag) from comments.
## Documenting structs
You should always comment every field which will be exposed as a json in the api.
These comments will show up in the documentation, it'll make it easier for developers using the api.
As an example, this is the definition of a list with all comments:
{{< highlight golang >}}
// List represents a list of tasks
type List struct {
// The unique, numeric id of this list.
ID int64 `xorm:"bigint autoincr not null unique pk" json:"id" param:"list"`
// The title of the list. You'll see this in the namespace overview.
Title string `xorm:"varchar(250)" json:"title" valid:"required,runelength(3|250)" minLength:"3" maxLength:"250"`
// The description of the list.
Description string `xorm:"varchar(1000)" json:"description" valid:"runelength(0|1000)" maxLength:"1000"`
OwnerID int64 `xorm:"bigint INDEX" json:"-"`
NamespaceID int64 `xorm:"bigint INDEX" json:"-" param:"namespace"`
// The user who created this list.
Owner User `xorm:"-" json:"owner" valid:"-"`
// An array of tasks which belong to the list.
Tasks []*ListTask `xorm:"-" json:"tasks"`
// A unix timestamp when this list was created. You cannot change this value.
Created int64 `xorm:"created" json:"created"`
// A unix timestamp when this list was last updated. You cannot change this value.
Updated int64 `xorm:"updated" json:"updated"`
web.CRUDable `xorm:"-" json:"-"`
web.Rights `xorm:"-" json:"-"`
}
{{< /highlight >}}

View File

@ -10,50 +10,45 @@ menu:
# What to backup
There are two parts you need to back up: The database and attachment files.
Vikunja does not store any data outside of the database.
So, all you need to backup are the contents of that database and maybe the config file.
{{< table_of_contents >}}
## Files
To back up attachments and other files, it is enough to copy them [from the attachments folder]({{< ref "config.md" >}}#basepath) to some other place.
## Database
### MySQL
## MySQL
To create a backup from mysql use the `mysqldump` command:
```
{{< highlight bash >}}
mysqldump -u <user> -p -h <db-host> <database> > vkunja-backup.sql
```
{{< /highlight >}}
You will be prompted for the password of the mysql user.
To restore it, simply pipe it back into the `mysql` command:
```
{{< highlight bash >}}
mysql -u <user> -p -h <db-host> <database> < vkunja-backup.sql
```
{{< /highlight >}}
### PostgreSQL
## PostgreSQL
To create a backup from PostgreSQL use the `pg_dump` command:
```
{{< highlight bash >}}
pg_dump -U <user> -h <db-host> <database> > vikunja-backup.sql
```
{{< /highlight >}}
You might be prompted for the password of the database user.
To restore it, simply pipe it back into the `psql` command:
```
{{< highlight bash >}}
psql -U <user> -h <db-host> <database> < vikunja-backup.sql
```
{{< /highlight >}}
For more information, please visit the [relevant PostgreSQL documentation](https://www.postgresql.org/docs/12/backup-dump.html).
### SQLite
## SQLite
To back up sqllite databases, it is enough to copy the [database file]({{< ref "config.md" >}}#path) to somewhere else.
To backup sqllite databases, it is enough to copy the database elsewhere.

View File

@ -1,5 +1,5 @@
---
date: "2022-09-21:00:00+02:00"
date: "2019-02-12:00:00+02:00"
title: "Build from sources"
draft: false
type: "doc"
@ -10,37 +10,20 @@ menu:
# Build Vikunja from source
To fully build Vikunja from source files, you need to build the api and frontend.
Vikunja being a go application, has no other dependencies than go itself.
All libraries are bundeled inside the repo in the `vendor/` folder, so all it boils down to are these steps:
{{< table_of_contents >}}
1. Make sure [Go](https://golang.org/doc/install) is properly installed on your system. You'll need at least Go `1.9`.
2. Make sure [Mage](https://magefile) is properly installed on your system.
3. Clone the repo with `git clone https://code.vikunja.io/api`
3. Run `mage build:build` in the source of this repo. This will build a binary in the root of the repo which will be able to run on your system.
## General Preparations
*Note:* Static ressources such as email templates are built into the binary.
For these to work, you may need to run `mage build:generate` before building the vikunja binary.
When builing entirely with `mage`, you dont need to do this, `mage build:generate` will be run automatically when running `mage build:build`.
1. Make sure you have git installed
2. Clone the repo with `git clone https://code.vikunja.io/vikunja` and switch into the directory.
3. Check out the version you want to build with `git checkout VERSION` - replace `VERSION` with the version want to use. If you don't do this, you'll build the [latest unstable build]({{< ref "versions.md">}}), which might contain bugs.
# Build for different architectures
## Frontend
To build for other platforms and architectures than the one you're currently on, simply run `mage release:release` or `mage release:{linux|windows|darwin}`.
The code for the frontend is located in the `frontend/` sub folder of the main repo.
1. Make sure you have [pnpm](https://pnpm.io/installation) properly installed on your system.
2. Install all dependencies with `pnpm install`
3. Build the frontend with `pnpm run build`. This will result in a static js bundle in the `dist/` folder.
4. You can either deploy that static js bundle directly, or read on to learn how to bundle it all up in a static binary with the api.
## API
The Vikunja API has no other dependencies than go itself.
That means compiling it boils down to these steps:
1. Make sure [Go](https://golang.org/doc/install) is properly installed on your system. You'll need at least Go `1.21`.
2. Make sure [Mage](https://magefile.org) is properly installed on your system.
3. If you did not build the frontend in the steps before, you need to either do that or create a dummy index file with `mkdir -p frontend/dist && touch frontend/dist/index.html`.
4. Run `mage build` in the source of the main repo. This will build a binary in the root of the repo which will be able to run on your system.
### Build for different architectures
To build for other platforms and architectures than the one you're currently on, simply run `mage release` or `mage release:{linux|windows|darwin}`.
More options are available, please refer to the [magefile docs]({{< ref "../development/mage.md">}}) for more details.
More options are available, please refer to the [magefile docs]({{< ref "../development/mage.md">}}) for more details.

File diff suppressed because it is too large Load Diff

View File

@ -1,30 +0,0 @@
---
title: "Desktop Packages"
date: 2024-02-11T15:58:18+01:00
draft: false
type: "doc"
menu:
sidebar:
parent: "setup"
---
# Desktop Packages
Vikunja is available as an electron-based desktop application for Linux and Windows.
## Installation
1. Download the latest release for your platform from [the download page](https://dl.vikunja.io/desktop/).
* For Windows, choose the file with the `.exe` or `.msi` file ending
* For a Linux-based operating system, choose a file with an ending for your operating system - we have builds for Alpine, AppImage, Arch Linux, Debian-based systems, FreeBSD, Fedora and Snap.
2. Run the downloaded package in the same way you would normally install a package for your OS.
## Flatpack
Vikunja Desktop can be installed via the [Flathub](https://flathub.org/apps/io.vikunja.Vikunja).
To install it, run the following command:
```
flatpak install flathub io.vikunja.Vikunja
```

View File

@ -27,69 +27,87 @@ Create a directory for the project where all data and the compose file will live
Create a `docker-compose.yml` file with the following contents in your directory:
```yaml
{{< highlight yaml >}}
version: '3'
services:
vikunja:
image: vikunja/vikunja
environment:
VIKUNJA_SERVICE_PUBLICURL: http://<the public url where vikunja is reachable>
VIKUNJA_DATABASE_HOST: db
VIKUNJA_DATABASE_PASSWORD: changeme
VIKUNJA_DATABASE_TYPE: mysql
VIKUNJA_DATABASE_USER: vikunja
VIKUNJA_DATABASE_DATABASE: vikunja
VIKUNJA_SERVICE_JWTSECRET: <a super secure random secret>
ports:
- 3456:3456
volumes:
- ./files:/app/vikunja/files
depends_on:
db:
condition: service_healthy
restart: unless-stopped
db:
image: mariadb:10
command: --character-set-server=utf8mb4 --collation-server=utf8mb4_unicode_ci
environment:
MYSQL_ROOT_PASSWORD: supersecret
MYSQL_USER: vikunja
MYSQL_PASSWORD: changeme
MYSQL_DATABASE: vikunja
volumes:
- ./db:/var/lib/mysql
restart: unless-stopped
healthcheck:
test: ["CMD-SHELL", "mysqladmin ping -h localhost -u $$MYSQL_USER --password=$$MYSQL_PASSWORD"]
interval: 2s
start_period: 30s
```
db:
image: mariadb:10
command: --character-set-server=utf8mb4 --collation-server=utf8mb4_unicode_ci
environment:
MYSQL_ROOT_PASSWORD: supersecret
MYSQL_USER: vikunja
MYSQL_PASSWORD: secret
MYSQL_DATABASE: vikunja
volumes:
- ./db:/var/lib/mysql
restart: unless-stopped
api:
image: vikunja/api
environment:
VIKUNJA_DATABASE_HOST: db
VIKUNJA_DATABASE_PASSWORD: secret
VIKUNJA_DATABASE_TYPE: mysql
VIKUNJA_DATABASE_USER: vikunja
VIKUNJA_DATABASE_DATABASE: vikunja
volumes:
- ./files:/app/vikunja/files
depends_on:
- db
restart: unless-stopped
frontend:
image: vikunja/frontend
restart: unless-stopped
proxy:
image: nginx
ports:
- 80:80
volumes:
- ./nginx.conf:/etc/nginx/conf.d/default.conf:ro
depends_on:
- api
- frontend
restart: unless-stopped
{{< /highlight >}}
This defines two services, each with their own container:
* A Vikunja service which runs the vikunja api and hosts its frontend.
* A database container which will store all projects, tasks, etc. We're using mariadb here, but you're free to use mysql or postgres if you want.
If you already have a proxy on your host, you may want to check out the [reverse proxy examples]({{< ref "reverse-proxies.md" >}}) to use that.
By default, Vikunja will be exposed on port 3456 on the host.
This defines four services, each with their own container:
* An api service which runs the vikunja api. Most of the core logic lives here.
* The frontend which will make vikunja actually usable for most people.
* A database container which will store all lists, tasks, etc. We're using mariadb here, but you're free to use mysql or postgres if you want.
* A proxy service which makes the frontend and api available on the same port, redirecting all requests to `/api` to the api container.
If you already have a proxy on your host, you may want to check out the [reverse proxy examples]() to use that.
By default, it uses port 80 on the host.
To change to something different, you'll need to change the `ports` section in the service definition.
The number before the colon is the host port - This is where you can reach vikunja from the outside once all is up and running.
You'll need to change the value of the `VIKUNJA_SERVICE_PUBLICURL` environment variable to the public port or hostname where Vikunja is reachable.
For the proxy service we'll need another bit of configuration.
Create an `nginx.conf` in your directory (next to the `docker-compose.yml` file) and add the following contents to it:
## Ensure adequate file permissions
{{< highlight conf >}}
server {
listen 80;
Vikunja runs as user `1000` and no group by default.
location / {
proxy_pass http://frontend:80;
}
To be able to upload task attachments or change the background of project, Vikunja must be able to write into the `files` directory.
To do this, create the folder and chown it before starting the stack:
location ~* ^/(api|dav|\.well-known)/ {
proxy_pass http://api:3456;
client_max_body_size 20M;
}
}
{{< /highlight >}}
```
mkdir $PWD/files
chown 1000 $PWD/files
```
This is a simple proxy configuration which will forward all requests to `/api/` to the api container and everything else to the frontend.
<div class="notification is-info">
<b>NOTE:</b> Even if you want to make your installation available under a different port, you don't need to change anything in this configuration.
</div>
<div class="notification is-warning">
<b>NOTE:</b> If you change the max upload size in Vikunja's settings, you'll need to also change the <code>client_max_body_size</code> in the nginx proxy config.
</div>
## Run it
@ -98,8 +116,8 @@ When first started, Vikunja will set up the database and run all migrations etc.
Once it is ready, you should see a message like this one in your console:
```
vikunja_1 | 2024-02-09T14:44:06.990677157+01:00: INFO ▶ cmd/func29 05d Vikunja version 0.23.0
vikunja_1 | ⇨ http server started on [::]:3456
api_1 | 2020-05-24T11:15:37.560386009Z: INFO ▶ cmd/func1 025 Vikunja version 0.13.1+19-e9bc3246ce, built at Sun, 24 May 2020 11:10:36 +0000
api_1 | ⇨ http server started on [::]:3456
```
This indicates all setup has been successful.
@ -139,6 +157,20 @@ If not, there might be a different error or a bug with Vikunja, please reach out
(If you have an idea about how we could improve this, we'd like to hear it!)
#### "Not a directory"
If you get an error like this one:
```
ERROR: for vikunja_proxy_1 Cannot start service proxy: OCI runtime create failed: container_linux.go:349: starting container process caused "process_linux.go:449: container init caused \"rootfs_linux.go:58: mounting \\\"vikunja/nginx.conf\\\" to rootfs \\\"/var/lib/docker/overlay2/9c8b8f9419c29dad0d1233fbb0a3c36cf403dabd7a55d6f0a47b0c1dd6029994/merged\\\" at \\\"/var/lib/docker/overlay2/9c8b8f9419c29dad0d1233fbb0a3c36cf403dabd7a55d6f0a47b0c1dd6029994/merged/etc/nginx/conf.d/default.conf\\\" caused \\\"not a directory\\\"\"": unknown: Are you trying to mount a directory onto a file (or vice-versa)? Check if the specified host path exists and is the expected type
```
this means docker tried to mount a directory from the host to a file in the container.
This can happen if you did not create the `nginx.conf` file.
Because there is a volume mount for it in the `docker-compose.yml`, Docker will create a folder because non exists, assuming you want to mount a folder into the container.
To fix this, create the file and restart the containers again.
#### Migration failed: commands out of sync
If you get an error like this one:
@ -158,46 +190,21 @@ To do this, first stop everything by running `sudo docker-compose down`, then re
Head over to `http://<host-ip or url>/api/v1/info` in a browser.
You should see something like this:
```json
{{< highlight json >}}
{
"version": "v0.23.0",
"frontend_url": "https://try.vikunja.io/",
"motd": "",
"link_sharing_enabled": true,
"max_file_size": "20MB",
"registration_enabled": true,
"available_migrators": [
"vikunja-file",
"ticktick",
"todoist"
],
"task_attachments_enabled": true,
"enabled_background_providers": [
"upload",
"unsplash"
],
"totp_enabled": false,
"legal": {
"imprint_url": "",
"privacy_policy_url": ""
},
"caldav_enabled": true,
"auth": {
"local": {
"enabled": true
},
"openid_connect": {
"enabled": false,
"providers": null
}
},
"email_reminders_enabled": true,
"user_deletion_enabled": true,
"task_comments_enabled": true,
"demo_mode_enabled": true,
"webhooks_enabled": true
"version": "0.13.1+19-e9bc3246ce",
"frontend_url": "http://localhost:8080/",
"motd": "test",
"link_sharing_enabled": true,
"max_file_size": "20MB",
"registration_enabled": true,
"available_migrators": [
"wunderlist",
"todoist"
],
"task_attachments_enabled": true
}
```
{{< /highlight >}}
This shows you can reach the api through the api proxy.

View File

@ -10,338 +10,29 @@ menu:
# Full docker example
This docker compose configuration will run Vikunja with a mariadb database.
It uses a proxy configuration to make it available under a domain.
This docker compose configuration will run Vikunja with backend and frontend with a mariadb as database.
It uses an nginx container or traefik on the host to proxy backend and frontend into a single port.
For all available configuration options, see [configuration]({{< ref "config.md">}}).
After registering all your users, you might also want to [disable the user registration]({{<ref "config.md">}}#enableregistration).
<div class="notification is-warning">
<b>NOTE:</b> If you intend to run Vikunja with mysql and/or to use non-latin characters
<b>NOTE:</b> If you intend to run Vikunja with mysql and/or to use non-latin characters
<a href="{{< ref "utf-8.md">}}">make sure your db is utf-8 compatible</a>.<br/>
All examples on this page already reflect this and do not require additional work.
</div>
{{< table_of_contents >}}
## File permissions
Vikunja runs as user `1000` and no group by default.
You can use Docker's [`--user`](https://docs.docker.com/engine/reference/run/#user) flag to change that.
You must ensure Vikunja is able to write into the `files` directory.
To do this, create the folder and chown it before starting the stack:
```
mkdir $PWD/files
chown 1000 $PWD/files
```
You'll need to do this before running any of the examples on this page.
Vikunja will not try to aquire ownership of the files folder, as that would mean it had to run as root.
## PostgreSQL
Vikunja supports postgres, mysql and sqlite as a database backend. The examples on this page use mysql with a mariadb container.
To use postgres as a database backend, change the `db` section of the examples to this:
```yaml
db:
image: postgres:16
environment:
POSTGRES_PASSWORD: changeme
POSTGRES_USER: vikunja
volumes:
- ./db:/var/lib/postgresql/data
restart: unless-stopped
healthcheck:
test: ["CMD-SHELL", "pg_isready -h localhost -U $$POSTGRES_USER"]
interval: 2s
```
You'll also need to change the `VIKUNJA_DATABASE_TYPE` to `postgres` on the api container declaration.
## Sqlite
Vikunja supports postgres, mysql and sqlite as a database backend. The examples on this page use mysql with a mariadb container.
To use sqlite as a database backend, change the `api` section of the examples to this:
```yaml
vikunja:
image: vikunja/vikunja
environment:
VIKUNJA_SERVICE_JWTSECRET: <a super secure random secret>
VIKUNJA_SERVICE_PUBLICURL: http://<your public frontend url with slash>/
# Note the default path is /app/vikunja/vikunja.db.
# This config variable moves it to a different folder so you can use a volume and
# store the database file outside the container so state is persisted even if the container is destroyed.
VIKUNJA_DATABASE_PATH: /db/vikunja.db
ports:
- 3456:3456
volumes:
- ./files:/app/vikunja/files
- ./db:/db
restart: unless-stopped
```
The default path Vikunja uses for sqlite is relative to the binary, which in the docker container would be `/app/vikunja/vikunja.db`.
The `VIKUNJA_DATABASE_PATH` environment variable moves changes it so that the database file is stored in a volume at `/db`, to persist state across restarts.
You'll also need to remove or change the `VIKUNJA_DATABASE_TYPE` to `sqlite` on the container declaration.
You can also remove the db section.
To run the container, you need to create the directories first and make sure they have all required permissions:
```
mkdir $PWD/files $PWD/db
chown 1000 $PWD/files $PWD/db
```
<div class="notification is-warning">
<b>NOTE:</b> If you'll use your instance with more than a handful of users, we recommend using mysql or postgres.
</div>
## Example without any proxy
This example lets you host Vikunja without any reverse proxy in front of it.
This is the absolute minimum configuration you need to get something up and running.
If you want to make Vikunja available on a domain or need tls termination, check out one of the other examples.
Note that you need to change the [`VIKUNJA_SERVICE_PUBLICURL`]({{< ref "config.md" >}}#publicurl) environment variable to the public ip or hostname including the port (the docker host you're running this on) is reachable at, prefixed with `http://`.
Because the browser you'll use to access the Vikunja frontend uses that url to make the requests, it has to be able to reach it from the outside.
<div class="notification is-warning">
<b>NOTE:</b> You must ensure Vikunja has write permissions on the `files` directory before starting the stack.
To do this, <a href="#file-permissions">check out the related commands here</a>.
</div>
```yaml
version: '3'
services:
vikunja:
image: vikunja/vikunja
environment:
VIKUNJA_SERVICE_PUBLICURL: http://<the public ip or host where vikunja is reachable>
VIKUNJA_DATABASE_HOST: db
VIKUNJA_DATABASE_PASSWORD: changeme
VIKUNJA_DATABASE_TYPE: mysql
VIKUNJA_DATABASE_USER: vikunja
VIKUNJA_DATABASE_DATABASE: vikunja
VIKUNJA_SERVICE_JWTSECRET: <a super secure random secret>
ports:
- 3456:3456
volumes:
- ./files:/app/vikunja/files
depends_on:
db:
condition: service_healthy
restart: unless-stopped
db:
image: mariadb:10
command: --character-set-server=utf8mb4 --collation-server=utf8mb4_unicode_ci
environment:
MYSQL_ROOT_PASSWORD: supersecret
MYSQL_USER: vikunja
MYSQL_PASSWORD: changeme
MYSQL_DATABASE: vikunja
volumes:
- ./db:/var/lib/mysql
restart: unless-stopped
healthcheck:
test: ["CMD-SHELL", "mysqladmin ping -h localhost -u $$MYSQL_USER --password=$$MYSQL_PASSWORD"]
interval: 2s
start_period: 30s
```
## Example with Traefik 2
This example assumes [traefik](https://traefik.io) version 2 installed and configured to [use docker as a configuration provider](https://docs.traefik.io/providers/docker/).
We also make a few assumptions here which you'll most likely need to adjust for your traefik setup:
* Your domain is `vikunja.example.com`
* The entrypoint you want to make vikunja available from is called `https`
* The tls cert resolver is called `acme`
<div class="notification is-warning">
<b>NOTE:</b> You must ensure Vikunja has write permissions on the `files` directory before starting the stack.
To do this, <a href="#file-permissions">check out the related commands here</a>.
</div>
```yaml
version: '3'
services:
vikunja:
image: vikunja/vikunja
environment:
VIKUNJA_SERVICE_PUBLICURL: http://<the public url where vikunja is reachable>
VIKUNJA_DATABASE_HOST: db
VIKUNJA_DATABASE_PASSWORD: changeme
VIKUNJA_DATABASE_TYPE: mysql
VIKUNJA_DATABASE_USER: vikunja
VIKUNJA_DATABASE_DATABASE: vikunja
VIKUNJA_SERVICE_JWTSECRET: <a super secure random secret>
volumes:
- ./files:/app/vikunja/files
networks:
- web
- default
depends_on:
db:
condition: service_healthy
restart: unless-stopped
labels:
- "traefik.enable=true"
- "traefik.docker.network=web"
- "traefik.http.routers.vikunja.rule=Host(`vikunja.example.com`)"
- "traefik.http.routers.vikunja.entrypoints=https"
- "traefik.http.routers.vikunja.tls.certResolver=acme"
db:
image: mariadb:10
command: --character-set-server=utf8mb4 --collation-server=utf8mb4_unicode_ci
environment:
MYSQL_ROOT_PASSWORD: supersupersecret
MYSQL_USER: vikunja
MYSQL_PASSWORD: changeme
MYSQL_DATABASE: vikunja
volumes:
- ./db:/var/lib/mysql
restart: unless-stopped
healthcheck:
test: ["CMD-SHELL", "mysqladmin ping -h localhost -u $$MYSQL_USER --password=$$MYSQL_PASSWORD"]
interval: 2s
start_period: 30s
networks:
web:
external: true
```
## Example with Caddy v2 as proxy
You will need the following `Caddyfile` on your host (or elsewhere, but then you'd need to adjust the proxy mount at the bottom of the compose file):
```conf
vikunja.example.com {
reverse_proxy api:3456
}
```
Note that you need to change the [`VIKUNJA_SERVICE_PUBLICURL`]({{< ref "config.md" >}}#publicurl) environment variable to the ip (the docker host you're running this on) is reachable at.
Because the browser you'll use to access the Vikunja frontend uses that url to make the requests, it has to be able to reach that ip + port from the outside.
<div class="notification is-warning">
<b>NOTE:</b> You must ensure Vikunja has write permissions on the `files` directory before starting the stack.
To do this, <a href="#file-permissions">check out the related commands here</a>.
</div>
Docker Compose config:
```yaml
version: '3'
services:
vikunja:
image: vikunja/vikunja
environment:
VIKUNJA_SERVICE_PUBLICURL: http://<the public url where vikunja is reachable>
VIKUNJA_DATABASE_HOST: db
VIKUNJA_DATABASE_PASSWORD: changeme
VIKUNJA_DATABASE_TYPE: mysql
VIKUNJA_DATABASE_USER: vikunja
VIKUNJA_DATABASE_DATABASE: vikunja
VIKUNJA_SERVICE_JWTSECRET: <a super secure random secret>
ports:
- 3456:3456
volumes:
- ./files:/app/vikunja/files
depends_on:
db:
condition: service_healthy
restart: unless-stopped
db:
image: mariadb:10
command: --character-set-server=utf8mb4 --collation-server=utf8mb4_unicode_ci
environment:
MYSQL_ROOT_PASSWORD: supersecret
MYSQL_USER: vikunja
MYSQL_PASSWORD: changeme
MYSQL_DATABASE: vikunja
volumes:
- ./db:/var/lib/mysql
restart: unless-stopped
healthcheck:
test: ["CMD-SHELL", "mysqladmin ping -h localhost -u $$MYSQL_USER --password=$$MYSQL_PASSWORD"]
interval: 2s
start_period: 30s
caddy:
image: caddy
restart: unless-stopped
ports:
- "80:80"
- "443:443"
depends_on:
- api
- frontend
volumes:
- ./Caddyfile:/etc/caddy/Caddyfile:ro
```
## Setup on a Synology NAS
There is a proxy preinstalled in DSM, so if you want to access Vikunja from outside,
you need to prepare a proxy rule the Vikunja Service.
![Synology Proxy Settings](/docs/synology-proxy-1.png)
You should also add 2 empty folders for mariadb and vikunja inside Synology's
docker main folders:
* Docker
* vikunja
* mariadb
Synology has its own GUI for managing Docker containers, but it's easier via docker compose.
To do that, you can
* Either activate SSH and paste the adapted compose file in a terminal (using Putty or similar)
* Without activating SSH as a "custom script" (go to Control Panel / Task Scheduler / Create / Scheduled Task / User-defined script)
* Without activating SSH, by using Portainer (you have to install first, check out [this tutorial](https://www.portainer.io/blog/how-to-install-portainer-on-a-synology-nas) for exmple):
1. Go to **Dashboard / Stacks** click the button **"Add Stack"**
2. Give it the name Vikunja and paste the adapted docker compose file
3. Deploy the Stack with the "Deploy Stack" button:
![Portainer Stack deploy](/docs/synology-proxy-2.png)
The docker-compose file we're going to use is exactly the same from the [example without any proxy](#example-without-any-proxy) above.
You may want to change the volumes to match the rest of your setup.
After registering all your users, you might also want to [disable the user registration]({{<ref "config.md">}}#enableregistration).
<div class="notification is-warning">
<b>NOTE:</b> You must ensure Vikunja has write permissions on the `files` directory before starting the stack.
To do this, <a href="#file-permissions">check out the related commands here</a>.
</div>
## Redis
While Vikunja has support to use redis as a caching backend, you'll probably not need it unless you're using Vikunja with more than a handful of users.
To use redis, you'll need to add this to the config examples below:
```yaml
{{< highlight yaml >}}
version: '3'
services:
vikunja:
image: vikunja/vikunja
api:
image: vikunja/api
environment:
VIKUNJA_REDIS_ENABLED: 1
VIKUNJA_REDIS_HOST: 'redis:6379'
@ -351,4 +42,256 @@ services:
- ./files:/app/vikunja/files
redis:
image: redis
```
{{< /highlight >}}
## Example with traefik 2
This example assumes [traefik](https://traefik.io) version 2 installed and configured to [use docker as a configuration provider](https://docs.traefik.io/providers/docker/).
We also make a few assumtions here which you'll most likely need to adjust for your traefik setup:
* Your domain is `vikunja.example.com`
* The entrypoint you want to make vikunja available from is called `https`
* The tls cert resolver is called `acme`
{{< highlight yaml >}}
version: '3'
services:
api:
image: vikunja/api
environment:
VIKUNJA_DATABASE_HOST: db
VIKUNJA_DATABASE_PASSWORD: supersecret
VIKUNJA_DATABASE_TYPE: mysql
VIKUNJA_DATABASE_USER: vikunja
VIKUNJA_DATABASE_DATABASE: vikunja
volumes:
- ./files:/app/vikunja/files
networks:
- web
- default
depends_on:
- db
restart: unless-stopped
labels:
- "traefik.enable=true"
- "traefik.http.routers.vikunja-api.rule=Host(`vikunja.example.com`) && PathPrefix(`/api/v1`, `/dav/`, `/.well-known/`)"
- "traefik.http.routers.vikunja-api.entrypoints=https"
- "traefik.http.routers.vikunja-api.tls.certResolver=acme"
frontend:
image: vikunja/frontend
labels:
- "traefik.enable=true"
- "traefik.http.routers.vikunja-frontend.rule=Host(`vikunja.example.com`)"
- "traefik.http.routers.vikunja-frontend.entrypoints=https"
- "traefik.http.routers.vikunja-frontend.tls.certResolver=acme"
networks:
- web
- default
restart: unless-stopped
db:
image: mariadb:10
command: --character-set-server=utf8mb4 --collation-server=utf8mb4_unicode_ci
environment:
MYSQL_ROOT_PASSWORD: supersupersecret
MYSQL_USER: vikunja
MYSQL_PASSWORD: supersecret
MYSQL_DATABASE: vikunja
volumes:
- ./db:/var/lib/mysql
restart: unless-stopped
command: --max-connections=1000
networks:
web:
external: true
{{< /highlight >}}
## Example with traefik 1
This example assumes [traefik](https://traefik.io) in version 1 installed and configured to [use docker as a configuration provider](https://docs.traefik.io/v1.7/configuration/backends/docker/).
{{< highlight yaml >}}
version: '3'
services:
api:
image: vikunja/api
environment:
VIKUNJA_DATABASE_HOST: db
VIKUNJA_DATABASE_PASSWORD: supersecret
VIKUNJA_DATABASE_TYPE: mysql
VIKUNJA_DATABASE_USER: vikunja
VIKUNJA_DATABASE_DATABASE: vikunja
volumes:
- ./files:/app/vikunja/files
networks:
- web
- default
depends_on:
- db
restart: unless-stopped
labels:
- "traefik.docker.network=web"
- "traefik.enable=true"
- "traefik.frontend.rule=Host:vikunja.example.com;PathPrefix:/api/v1,/dav/,/.well-known"
- "traefik.port=3456"
- "traefik.protocol=http"
frontend:
image: vikunja/frontend
labels:
- "traefik.docker.network=web"
- "traefik.enable=true"
- "traefik.frontend.rule=Host:vikunja.example.com;PathPrefix:/"
- "traefik.port=80"
- "traefik.protocol=http"
networks:
- web
- default
restart: unless-stopped
db:
image: mariadb:10
command: --character-set-server=utf8mb4 --collation-server=utf8mb4_unicode_ci
environment:
MYSQL_ROOT_PASSWORD: supersupersecret
MYSQL_USER: vikunja
MYSQL_PASSWORD: supersecret
MYSQL_DATABASE: vikunja
volumes:
- ./db:/var/lib/mysql
restart: unless-stopped
command: --max-connections=1000
networks:
web:
external: true
{{< /highlight >}}
## Example with nginx as proxy
You'll need to save this nginx configuration on your host under `nginx.conf`
(or elsewhere, but then you'd need to adjust the proxy mount at the bottom of the compose file):
{{< highlight conf >}}
server {
listen 80;
location / {
proxy_pass http://frontend:80;
}
location ~* ^/(api|dav|\.well-known)/ {
proxy_pass http://api:3456;
client_max_body_size 20M;
}
}
{{< /highlight >}}
<div class="notification is-warning">
<b>NOTE:</b> If you change the max upload size in Vikunja's settings, you'll need to also change the <code>client_max_body_size</code> in the nginx proxy config.
</div>
`docker-compose.yml` config:
{{< highlight yaml >}}
version: '3'
services:
db:
image: mariadb:10
command: --character-set-server=utf8mb4 --collation-server=utf8mb4_unicode_ci
environment:
MYSQL_ROOT_PASSWORD: supersecret
MYSQL_USER: vikunja
MYSQL_PASSWORD: secret
MYSQL_DATABASE: vikunja
volumes:
- ./db:/var/lib/mysql
restart: unless-stopped
api:
image: vikunja/api
environment:
VIKUNJA_DATABASE_HOST: db
VIKUNJA_DATABASE_PASSWORD: secret
VIKUNJA_DATABASE_TYPE: mysql
VIKUNJA_DATABASE_USER: vikunja
VIKUNJA_DATABASE_DATABASE: vikunja
volumes:
- ./files:/app/vikunja/files
depends_on:
- db
restart: unless-stopped
frontend:
image: vikunja/frontend
restart: unless-stopped
proxy:
image: nginx
ports:
- 80:80
volumes:
- ./nginx.conf:/etc/nginx/conf.d/default.conf:ro
depends_on:
- api
- frontend
restart: unless-stopped
{{< /highlight >}}
## Example with Caddy v2 as proxy
You will need the following `Caddyfile` on your host (or elsewhere, but then you'd need to adjust the proxy mount at the bottom of the compose file):
{{< highlight conf >}}
vikunja.example.com {
reverse_proxy /api/* api:3456
reverse_proxy /.well-known/* api:3456
reverse_proxy /dav/* api:3456
reverse_proxy frontend:80
}
{{< /highlight >}}
`docker-compose.yml` config:
{{< highlight yaml >}}
version: '3'
services:
db:
image: mariadb:10
command: --character-set-server=utf8mb4 --collation-server=utf8mb4_unicode_ci
environment:
MYSQL_ROOT_PASSWORD: supersecret
MYSQL_USER: vikunja
MYSQL_PASSWORD: secret
MYSQL_DATABASE: vikunja
volumes:
- ./db:/var/lib/mysql
restart: unless-stopped
api:
image: vikunja/api
environment:
VIKUNJA_DATABASE_HOST: db
VIKUNJA_DATABASE_PASSWORD: secret
VIKUNJA_DATABASE_TYPE: mysql
VIKUNJA_DATABASE_USER: vikunja
VIKUNJA_DATABASE_DATABASE: vikunja
volumes:
- ./files:/app/vikunja/files
depends_on:
- db
restart: unless-stopped
frontend:
image: vikunja/frontend
restart: unless-stopped
caddy:
image: caddy
restart: unless-stopped
ports:
- "80:80"
- "443:443"
depends_on:
- api
- frontend
volumes:
- ./Caddyfile:/etc/caddy/Caddyfile:ro
{{< /highlight >}}

View File

@ -0,0 +1,279 @@
---
date: "2019-02-12:00:00+02:00"
title: "Install Backend"
draft: false
type: "doc"
menu:
sidebar:
parent: "setup"
---
# Backend
<div class="notification is-warning">
<b>NOTE:</b> If you intend to run Vikunja with mysql and/or to use non-latin characters
<a href="{{< ref "utf-8.md">}}">make sure your db is utf-8 compatible</a>.
</div>
{{< table_of_contents >}}
## Install from binary
Download a copy of Vikunja from the [download page](https://vikunja.io/en/download/) for your architecture.
{{< highlight bash >}}
wget <download-url>
{{< /highlight >}}
### Verify the GPG signature
Starting with version `0.7`, all releases are signed using pgp.
Releases from `main` will always be signed.
To validate the downloaded zip file use the signiture file `.asc` and the key `FF054DACD908493A`:
{{< highlight bash >}}
gpg --keyserver keyserver.ubuntu.com --recv FF054DACD908493A
gpg --verify vikunja-0.7-linux-amd64-full.zip.asc vikunja-0.7-linux-amd64-full.zip
{{< /highlight >}}
### Set it up
Once you've verified the signature, you need to unzip it and make it executable, you'll also need to
create a symlink to it so you can execute Vikunja by typing `vikunja` on your system.
We'll install vikunja to `/opt/vikunja`, change the path where needed if you want to install it elsewhere.
{{< highlight bash >}}
mkdir -p /opt/vikunja
unzip <vikunja-zip-file> -d /opt/vikunja
chmod +x /opt/vikunja
ln -s /opt/vikunja/vikunja /usr/bin/vikunja
{{< /highlight >}}
### Systemd service
Take the following `service` file and adapt it to your needs:
{{< highlight service >}}
[Unit]
Description=Vikunja
After=syslog.target
After=network.target
# Depending on how you configured Vikunja, you may want to uncomment these:
#Requires=mysql.service
#Requires=mariadb.service
#Requires=postgresql.service
#Requires=redis.service
[Service]
RestartSec=2s
Type=simple
WorkingDirectory=/opt/vikunja
ExecStart=/usr/bin/vikunja
Restart=always
# If you want to bind Vikunja to a port below 1024 uncomment
# the two values below
###
#CapabilityBoundingSet=CAP_NET_BIND_SERVICE
#AmbientCapabilities=CAP_NET_BIND_SERVICE
[Install]
WantedBy=multi-user.target
{{< /highlight >}}
If you've installed Vikunja to a directory other than `/opt/vikunja`, you need to adapt `WorkingDirectory` accordingly.
Save the file to `/etc/systemd/system/vikunja.service`
After you made all nessecary modifications, it's time to start the service:
{{< highlight bash >}}
sudo systemctl enable vikunja
sudo systemctl start vikunja
{{< /highlight >}}
### Build from source
To build vikunja from source, see [building from source]({{< ref "build-from-source.md">}}).
### Updating
Simply replace the binary and templates with the new version, then restart Vikunja.
It will automatically run all nessecary database migrations.
**Make sure to take a look at the changelog for the new version to not miss any manual steps the update may involve!**
## Docker
(Note: this assumes some familarity with docker)
Usage with docker is pretty straightforward:
{{< highlight bash >}}
docker run -p 3456:3456 vikunja/api
{{< /highlight >}}
to run with a standard configuration.
This will expose vikunja on port `3456` on the host running the container.
You can mount a local configuration like so:
{{< highlight bash >}}
docker run -p 3456:3456 -v /path/to/config/on/host.yml:/app/vikunja/config.yml:ro vikunja/api
{{< /highlight >}}
Though it is recommended to use eviroment variables or `.env` files to configure Vikunja in docker.
See [config]({{< ref "config.md">}}) for a list of available configuration options.
### Files volume
By default the container stores all files uploaded and used through vikunja inside of `/app/vikunja/files` which is created as a docker volume.
You should mount the volume somewhere to the host to permanently store the files and don't loose them if the container restarts.
### Setting user and group id of the user running vikunja
You can set the user and group id of the user running vikunja with the `PUID` and `PGID` evironment variables.
This follows the pattern used by [the linuxserver.io](https://docs.linuxserver.io/general/understanding-puid-and-pgid) docker images.
This is useful to solve general permission problems when host-mounting volumes such as the volume used for task attachments.
### Docker compose
To run the backend with a mariadb database you can use this example [docker-compose](https://docs.docker.com/compose/) file:
{{< highlight yaml >}}
version: '2'
services:
api:
image: vikunja/api:latest
environment:
VIKUNJA_DATABASE_HOST: db
VIKUNJA_DATABASE_PASSWORD: secret
VIKUNJA_DATABASE_TYPE: mysql
VIKUNJA_DATABASE_USER: vikunja
VIKUNJA_SERVICE_JWTSECRET: <generated secret>
volumes:
- ./files:/app/vikunja/files
db:
image: mariadb:10
command: --character-set-server=utf8mb4 --collation-server=utf8mb4_unicode_ci
environment:
MYSQL_ROOT_PASSWORD: supersecret
MYSQL_USER: vikunja
MYSQL_PASSWORD: secret
MYSQL_DATABASE: vikunja
volumes:
- ./db:/var/lib/mysql
{{< /highlight >}}
See [full docker example]({{< ref "full-docker-example.md">}}) for more varations of this config.
## Debian packages
Since version 0.7 Vikunja is also released as debian packages.
To install these, grab a copy from [the download page](https://vikunja.io/en/download/) and run
{{< highlight bash >}}
dpkg -i vikunja.deb
{{< /highlight >}}
This will install the backend to `/opt/vikunja`.
To configure it, use the config file in `/etc/vikunja/config.yml`.
## FreeBSD / FreeNAS
Unfortunately, we currently can't provide pre-built binaries for FreeBSD.
As a workaround, it is possible to compile vikunja for FreeBSD directly on a FreeBSD machine, a guide is available below:
*Thanks to HungrySkeleton who originally created this guide [in the forum](https://community.vikunja.io/t/freebsd-support/69/11).*
### Jail Setup
1. Create jail named ```vikunja```
2. Set jail properties to 'auto start'
3. Mount storage (```/mnt``` to ```jailData/vikunja```)
4. Start jail & SSH into it
### Installing packages
{{< highlight bash >}}
pkg update && pkg upgrade -y
pkg install nano git go gmake
go install github.com/magefile/mage
{{< /highlight >}}
### Clone vikunja repo
{{< highlight bash >}}
mkdir /mnt/GO/code.vikunja.io
cd /mnt/GO/code.vikunja.io
git clone https://code.vikunja.io/api
cd /mnt/GO/code.vikunja.io/api
{{< /highlight >}}
### Compile binaries
{{< highlight bash >}}
go install
mage build
{{< /highlight >}}
### Create folder to install backend server into
{{< highlight bash >}}
mkdir /mnt/backend
cp /mnt/GO/code.vikunja.io/api/vikunja /mnt/backend/vikunja
cd /mnt/backend
chmod +x /mnt/backend/vikunja
{{< /highlight >}}
### Set vikunja to boot on startup
{{< highlight bash >}}
nano /etc/rc.d/vikunja
{{< /highlight >}}
Then paste into the file:
{{< highlight bash >}}
#!/bin/sh
. /etc/rc.subr
name=vikunja
rcvar=vikunja_enable
command="/mnt/backend/${name}"
load_rc_config $name
run_rc_command "$1"
{{< /highlight >}}
Save and exit. Then execute:
{{< highlight bash >}}
chmod +x /etc/rc.d/vikunja
nano /etc/rc.conf
{{< /highlight >}}
Then add line to bottom of file:
{{< highlight bash >}}
vikunja_enable="YES"
{{< /highlight >}}
Test vikunja now works with
{{< highlight bash >}}
service vikunja start
{{< /highlight >}}
The API is now available through IP:
```
192.168.1.XXX:3456
```
## Configuration
See [available configuration options]({{< ref "config.md">}}).

View File

@ -0,0 +1,142 @@
---
date: "2019-02-12:00:00+02:00"
title: "Install Frontend"
draft: false
type: "doc"
menu:
sidebar:
parent: "setup"
---
# Frontend
Installing the frontend is just a matter of hosting a bunch of static files somewhere.
With nginx or apache, you have to [download](https://vikunja.io/en/download/) the frontend files first.
Unzip them and store them somewhere your server can access them.
You also need to configure a rewrite condition to internally redirect all requests to `index.html` which handles all urls.
{{< table_of_contents >}}
## API URL configuration
By default, the frontend assumes it can reach the api at `/api/v1` relative to the frontend url.
This means that if you make the frontend available at, say `https://vikunja.example.com`, it tries to reach the api
at `https://vikunja.example.com/api/v1`.
In this scenario it is not possible for the frontend and the api to live on seperate servers or even just seperate
ports on the same server with [the use of a reverse proxy]({{< ref "reverse-proxies.md">}}).
To make configurations like this possible, the api url can be set in the `index.html` file of the frontend releases.
Just open the file with a text editor - there are comments which will explain how to set the url.
**Note:** This needs to be done again after every update.
(If you have a good idea for a better solution than this, we'd love to [hear it](https://vikunja.io/contact/))
## Docker
The docker image is based on nginx and just contains all nessecary files for the frontend.
To run it, all you need is
{{< highlight bash >}}
docker run -p 80:80 vikunja/frontend
{{< /highlight >}}
which will run the docker image and expose port 80 on the host.
See [full docker example]({{< ref "full-docker-example.md">}}) for more varations of this config.
### Setting user and group id of the user running vikunja
You can set the user and group id of the user running vikunja with the `PUID` and `PGID` evironment variables.
This follows the pattern used by [the linuxserver.io](https://docs.linuxserver.io/general/understanding-puid-and-pgid) docker images.
### API URL configuration in docker
When running the frontend with docker, it is possible to set the environment variable `$VIKUNJA_API_URL` to the api url.
It is therefore not needed to change the url manually inside the docker container.
## NGINX
Below are two example configurations which you can put in your `nginx.conf`:
You may need to adjust `server_name` and `root` accordingly.
After configuring them, you need to reload nginx (`service nginx reload`).
### with gzip enabled (recommended)
{{< highlight conf >}}
gzip on;
gzip_disable "msie6";
gzip_vary on;
gzip_proxied any;
gzip_comp_level 6;
gzip_buffers 16 8k;
gzip_http_version 1.1;
gzip_min_length 256;
gzip_types text/plain text/css application/json application/x-javascript text/xml application/xml application/xml+rss text/javascript application/vnd.ms-fontobject application/x-font-ttf font/opentype image/svg+xml;
server {
listen 80;
server_name localhost;
location / {
root /path/to/vikunja/static/frontend/files;
try_files $uri $uri/ /;
index index.html index.htm;
}
}
{{< /highlight >}}
### without gzip
{{< highlight conf >}}
server {
listen 80;
server_name localhost;
location / {
root /path/to/vikunja/static/frontend/files;
try_files $uri $uri/ /;
index index.html index.htm;
}
}
{{< /highlight >}}
## Apache
Apache needs to have `mod_rewrite` enabled for this to work properly:
{{< highlight bash >}}
a2enmod rewrite
service apache2 restart
{{< /highlight >}}
Put the following config in `cat /etc/apache2/sites-available/vikunja.conf`:
{{< highlight aconf >}}
<VirtualHost *:80>
ServerName localhost
DocumentRoot /path/to/vikunja/static/frontend/files
RewriteEngine On
RewriteRule ^\/?(config\.json|favicon\.ico|css|fonts|images|img|js) - [L]
RewriteRule ^(.*)$ /index.html [QSA,L]
</VirtualHost>
{{< /highlight >}}
You probably want to adjust `ServerName` and `DocumentRoot`.
Once you've customized your config, you need to enable it:
{{< highlight bash >}}
a2ensite vikunja
service apache2 reload
{{< /highlight >}}
## Updating
To update, it should be enough to download the new files and overwrite the old ones.
The paths contain hashes, so all caches are invalidated automatically.

View File

@ -11,308 +11,31 @@ menu:
# Installing
Architecturally, Vikunja is made up of two parts: [API](https://code.vikunja.io/api) and [frontend](https://code.vikunja.io/api/frontend).
Vikunja consists of two parts: [Backend](https://code.vikunja.io/api) and [frontend](https://code.vikunja.io/frontend).
While the backend is required, the frontend is not.
You don't neccesarily need to have a web-frontend, using Vikunja via the [mobile app](https://code.vikunja.io/app) is totally fine.
Both are bundled into one single deployable binary (or docker container).
That means you only need to install one thing to be able to use Vikunja.
However, using the web frontend is highly reccommended.
You can also:
Vikunja can be installed in various forms.
This document provides an overview and instructions for the different methods.
* Use the desktop app, which is essentially the web frontend packaged for easy installation on desktop devices
* Use the mobile app only, but as of right now it only supports the very basic features of Vikunja
<div class="notification is-warning">
<b>NOTE:</b> If you intend to run Vikunja with mysql and/or to use non-latin characters
<a href="{{< ref "utf-8.md">}}">make sure your db is utf-8 compatible</a>.
</div>
Vikunja can be installed in various ways.
This document provides an overview and instructions for the different methods:
* [Installing from binary (manual)](#install-from-binary)
* [Build from source]({{< ref "build-from-source.md">}})
* [Docker](#docker)
* [Debian](#debian-packages)
* [RPM](#rpm)
* [FreeBSD](#freebsd--freenas)
* [Kubernetes]({{< ref "k8s.md" >}})
And after you installed Vikunja, you may want to check out these other ressources:
* [Configuration]({{< ref "config.md">}})
* [UTF-8 Settings]({{< ref "utf-8.md">}})
* [Backend]({{< ref "install-backend.md">}})
* [Installing from binary]({{< ref "install-backend.md#install-from-binary">}})
* [Verify the GPG signature]({{< ref "install-backend.md#verify-the-gpg-signature">}})
* [Set it up]({{< ref "install-backend.md#set-it-up">}})
* [Systemd service]({{< ref "install-backend.md#systemd-service">}})
* [Updating]({{< ref "install-backend.md#updating">}})
* [Build from source]({{< ref "install-backend.md#build-from-source">}})
* [Docker]({{< ref "install-backend.md#docker">}})
* [Debian packages]({{< ref "install-backend.md#debian-packages">}})
* [Configuration]({{< ref "config.md">}})
* [UTF-8 Settings]({{< ref "utf-8.md">}})
* [Frontend]({{< ref "install-frontend.md">}})
* [Docker]({{< ref "install-frontend.md#docker">}})
* [NGINX]({{< ref "install-frontend.md#nginx">}})
* [Apache]({{< ref "install-frontend.md#apache">}})
* [Updating]({{< ref "install-frontend.md#updating">}})
* [Reverse proxies]({{< ref "reverse-proxies.md">}})
* [Full docker example]({{< ref "full-docker-example.md">}})
* [Backups]({{< ref "backups.md">}})
## Install from binary
Download a copy of Vikunja from the [download page](https://dl.vikunja.io/vikunja) for your architecture.
```
wget <download-url>
```
### Verify the GPG signature
All releases are signed using GPG.
To validate the downloaded zip file use the signiture file `.asc` and the key `FF054DACD908493A`:
```
gpg --keyserver keyserver.ubuntu.com --recv FF054DACD908493A
gpg --verify vikunja-<vikunja version>-linux-amd64-full.zip.asc vikunja-<vikunja version>-linux-amd64-full.zip
```
### Set it up
Once you've verified the signature, you need to unzip and make it executable.
You'll also need to create a symlink to the binary, so that you can execute Vikunja by typing `vikunja` on your system.
We'll install vikunja to `/opt/vikunja`, change the path where needed if you want to install it elsewhere.
Run these commands to install it:
```
mkdir -p /opt/vikunja
unzip <vikunja-zip-file> -d /opt/vikunja
chmod +x /opt/vikunja
sudo ln -s /opt/vikunja/vikunja /usr/bin/vikunja
```
### Systemd service
To automatically start Vikunja when your system boots and to ensure all dependent services are met, you want to use an init system like systemd.
Save the following service file to `/etc/systemd/system/vikunja.service` and adapt it to your needs:
```unit file (systemd)
[Unit]
Description=Vikunja
After=syslog.target
After=network.target
# Depending on how you configured Vikunja, you may want to uncomment these:
#Requires=mysql.service
#Requires=mariadb.service
#Requires=postgresql.service
#Requires=redis.service
[Service]
RestartSec=2s
Type=simple
WorkingDirectory=/opt/vikunja
ExecStart=/usr/bin/vikunja
Restart=always
# If you want to bind Vikunja to a port below 1024 uncomment
# the two values below
###
#CapabilityBoundingSet=CAP_NET_BIND_SERVICE
#AmbientCapabilities=CAP_NET_BIND_SERVICE
[Install]
WantedBy=multi-user.target
```
If you've installed Vikunja to a directory other than `/opt/vikunja`, you need to adapt `WorkingDirectory` accordingly.
After you made all necessary modifications, it's time to start the service:
```
sudo systemctl enable vikunja
sudo systemctl start vikunja
```
### Build from source
To build vikunja from source, see [building from source]({{< ref "build-from-source.md">}}).
### Updating
[Make a backup first]({{< ref "backups.md" >}}).
Simply replace the binary with the new version, then restart Vikunja.
It will automatically run all necessary database migrations.
**Make sure to take a look at the changelog for the new version to not miss any manual steps the update may involve!**
## Docker
(Note: this assumes some familiarity with docker)
To get up and running quickly, use this command:
```
mkdir $PWD/files $PWD/db
chown 1000 $PWD/files $PWD/db
docker run -p 3456:3456 -v $PWD/files:/app/vikunja/files -v $PWD/db:/db vikunja/vikunja
```
This will expose vikunja on port `3456` on the host running the container and use sqlite as database backend.
**Note**: The container runs as the user `1000` and no group by default.
You can use Docker's [`--user`](https://docs.docker.com/engine/reference/run/#user) flag to change that.
Make sure the new user has required permissions on the `db` and `files` folder.
You can mount a local configuration like so:
```
mkdir $PWD/files $PWD/db
chown 1000 $PWD/files $PWD/db
docker run -p 3456:3456 -v /path/to/config/on/host.yml:/app/vikunja/config.yml:ro -v $PWD/files:/app/vikunja/files -v $PWD/db:/db vikunja/vikunja
```
Though it is recommended to use environment variables or `.env` files to configure Vikunja in docker.
See [config]({{< ref "config.md">}}) for a list of available configuration options.
Check out the [docker examples]({{<ref "full-docker-example.md">}}) for more advanced configuration using mysql / postgres and a reverse proxy.
### Files volume
By default, the container stores all files uploaded and used through vikunja inside of `/app/vikunja/files` which is created as a docker volume.
You should mount the volume somewhere to the host to permanently store the files and don't lose them if the container restarts.
### Docker compose
Check out the [docker examples]({{<ref "full-docker-example.md">}}) for more advanced configuration using docker compose.
## Debian packages
Vikunja is available as deb package for installation on debian-like systems.
To install these, grab a `.deb` file from [the download page](https://dl.vikunja.io/vikunja) and run
```
dpkg -i vikunja.deb
```
This will install Vikunja to `/opt/vikunja`.
To configure it, use the config file in `/etc/vikunja/config.yml`.
## RPM
Vikunja is available as rpm package for installation on Fedora, CentOS and others.
To install these, grab a `.rpm` file from [the download page](https://dl.vikunja.io/vikunja) and run
```
rpm -i vikunja.rpm
```
To configure Vikunja, use the config file in `/etc/vikunja/config.yml`.
## FreeBSD / FreeNAS
Unfortunately, we currently can't provide pre-built binaries for FreeBSD.
As a workaround, it is possible to compile vikunja for FreeBSD directly on a FreeBSD machine, a guide is available below:
*Thanks to HungrySkeleton who originally created this guide [in the forum](https://community.vikunja.io/t/freebsd-support/69/11).*
### Jail Setup
1. Create a jail named `vikunja`
2. Set jail properties to 'auto start'
3. Mount storage (`/mnt` to `jailData/vikunja`)
4. Start jail & SSH into it
### Installing packages
```
pkg update && pkg upgrade -y
pkg install nano git go gmake
go install github.com/magefile/mage
```
### Clone vikunja repo
```
mkdir /mnt/GO/code.vikunja.io
cd /mnt/GO/code.vikunja.io
git clone https://code.vikunja.io/vikunja
cd vikunja
```
**Note:** Ceck out the version you want to build with `git checkout VERSION` - replace `VERSION` with the version want to use.
If you don't do this, you'll build the [latest unstable build]({{< ref "versions.md">}}), which might contain bugs.
### Compile binaries
```
cd frontend
pnpm install
pnpm run build
cd ..
mage build
```
### Create folder to install Vikunja into
```
mkdir /mnt/vikunja
cp /mnt/GO/code.vikunja.io/api/vikunja /mnt/vikunja
cd /mnt/vikunja
chmod +x /mnt/vikunja
```
### Set vikunja to boot on startup
```
nano /etc/rc.d/vikunja
```
Then paste into the file:
```
#!/bin/sh
. /etc/rc.subr
name=vikunja
rcvar=vikunja_enable
command="/mnt/vikunja/${name}"
load_rc_config $name
run_rc_command "$1"
```
Save and exit. Then execute:
```
chmod +x /etc/rc.d/vikunja
nano /etc/rc.conf
```
Then add line to bottom of file:
```
vikunja_enable="YES"
```
Test vikunja now works with
```
service vikunja start
```
Vikunja is now available through IP:
```
192.168.1.XXX:3456
```
## Other installation resources
* [Docker Compose is MUCH Easier Than you Think - Let's Install Vikunja](https://www.youtube.com/watch?v=fGlz2PkXjuo) (Youtube)
* [Setup Vikunja using Docker Compose - Homelab Wiki](https://thehomelab.wiki/books/docker/page/setup-vikunja-using-docker-compose)
* [A Closer look at Vikunja - Email Notifications - Enable or Disable Registrations - Allow Attachments](https://www.youtube.com/watch?v=47wj9pRT6Gw) (Youtube)
* [Install Vikunja in Docker for self-hosted Task Tracking](https://smarthomepursuits.com/install-vikunja-in-docker-for-self-hosted-task-tracking/)
* [Self-Hosted To-Do List with Vikunja in Docker](https://www.youtube.com/watch?v=DqyqDWpEvKI) (Youtube)
* [Vikunja self-hosted (step by step)](https://nguyenminhhung.com/vikunja-self-hosted-step-by-step/)
* [How to Install Vikunja on Your Synology NAS](https://mariushosting.com/how-to-install-vikunja-on-your-synology-nas/)
## Configuration
See [available configuration options]({{< ref "config.md">}}).
## Default Password
After successfully installing Vikunja, there is no default user or password.
You only need to register a new account and set all the details when creating it.

View File

@ -1,22 +0,0 @@
---
title: "Hosting Vikunja with k8s"
date: 2022-08-12T13:41:48+02:00
draft: false
type: "doc"
menu:
sidebar:
parent: "setup"
---
# Hosting Vikunja with k8s
We have an official Helm Chart for Vikunja.
Check out [the repo](https://kolaente.dev/vikunja/helm-chart/) for more information about how to use it.
## Third-party Helm Charts
There are two third-party Helm-Charts which can be used to host Vikunja with k8s:
* [Truecharts](https://truecharts.org/charts/stable/vikunja/)
* [k8s at Home](https://github.com/k8s-at-home/charts)

View File

@ -1,57 +0,0 @@
---
date: "2023-03-09:00:00+02:00"
title: "Migration from third-party services"
draft: false
type: "doc"
menu:
sidebar:
parent: "setup"
weight: 5
---
# Migration from third-party services
There are several importers available for third-party services like Trello, Microsoft To Do or Todoist.
All available migration options can be found [here](https://kolaente.dev/vikunja/vikunja/src/branch/main/config.yml.sample#L218).
You can develop migrations for more services, see the [documentation]({{< ref "../development/migration.md">}}) for more info.
{{< table_of_contents >}}
## Trello
### Config Setup
Log into Trello and navigate to the [site](https://trello.com/app-key) to manage your API keys.
Save your `Personal Key` for later and add your Vikunja domain to the Allowed Origins list.
Create a `config.yml` file based on [default config file](https://kolaente.dev/vikunja/vikunja/src/branch/main/config.yml.sample) if you haven't already.
- Copy the [Trello options](https://kolaente.dev/vikunja/vikunja/src/branch/main/config.yml.sample#L233) from the default config file
- Set `enable` to true
- Set `key` to your [trello API key](https://trello.com/app-key)
- Replace `<frontend url>` in `redirecturl` with your url
### Config Loading
To load the config with Vikunja, see the [installation]({{< ref "install.md">}}) documentation for instructions to load the `config.yml` file and start Vikunja.
### Config Loading with Docker Compose
In case you are using Docker Compose you need to edit `docker-compose.yml` to load `config.yml`.
Mount the `config.yml` file into the Vikunja container, by adding this line to the volumes of the Vikunja container and replacing the `./path/to/config.yml` with the relative path from the `docker-compose.yml` to your `config.yml`.
```yaml
volumes:
- ./path/to/config.yml:/etc/vikunja/config.yml
```
After all the setup is done, start Vikunja as shown in the [Docker Compose setup]({{< ref "full-docker-example.md">}}).
### Start the Migration Process
Log in, and navigate to Settings > Import from other services. In the list of available third-party services, there should be a Trello icon now.
If not, ensure that you are properly loading your config file. Refer to the Vikunja log to see if the config file was loaded or not.
In case the config file was loaded, and there is no Trello icon, make sure your [config setup](#config-setup) is correct.
Click on Trello and on Get Started. This will redirect you to Trello where you need to allow Vikunja Migration to access your account. In case there is an error when being directed to Trello, make sure that your Vikunja domain is in your Trello Allowed Origins list.
Once this is done, you will be redirected to Vikunja which should tell you that the migration is in progress now. Note that this can take up to several hours depending on the amount of boards in your Trello account.

View File

@ -1,117 +0,0 @@
---
date: "2022-08-09:00:00+02:00"
title: "OpenID example configurations"
draft: false
type: "doc"
menu:
sidebar:
parent: "setup"
---
# OpenID example configurations
On this page you will find examples about how to set up Vikunja with a third-party OAuth 2.0 provider using OpenID Connect.
To add another example, please [edit this document](https://kolaente.dev/vikunja/vikunja/src/branch/main/docs/content/doc/setup/openid-examples.md) and send a PR.
{{< table_of_contents >}}
## Authelia
Vikunja Config:
```yaml
openid:
enabled: true
redirecturl: https://vikunja.mydomain.com/auth/openid/ <---- slash at the end is important
providers:
- name: Authelia
authurl: https://login.mydomain.com
clientid: <vikunja-id>
clientsecret: <vikunja secret>
```
Authelia config:
```yaml
- id: <vikunja-id>
description: Vikunja
secret: <vikunja secret>
redirect_uris:
- https://vikunja.mydomain.com/auth/openid/authelia
scopes:
- openid
- email
- profile
```
## Google / Google Workspace
Vikunja Config:
```yaml
openid:
enabled: true
redirecturl: https://vikunja.mydomain.com/auth/openid/ <---- slash at the end is important
providers:
- name: Google
authurl: https://accounts.google.com
clientid: <google-oauth-client-id>
clientsecret: <google-oauth-client-secret>
```
Google config:
- Navigate to `https://console.cloud.google.com/apis/credentials` in the target project
- Create a new OAuth client ID
- Configure an authorized redirect URI of `https://vikunja.mydomain.com/auth/openid/google`
Note that there currently seems to be no way to stop creation of new users, even when `enableregistration` is `false` in the configuration. This means that this approach works well only with an "Internal Organization" app for Google Workspace, which limits the allowed users to organizational accounts only. External / public applications will potentially allow every Google user to register.
## Keycloak
Vikunja Config:
```yaml
openid:
enabled: true
redirecturl: https://vikunja.mydomain.com/auth/openid/ <---- slash at the end is important
providers:
- name: Keycloak
authurl: https://keycloak.mydomain.com/realms/<relam-name>
logouturl: https://keycloak.mydomain.com/realms/<relam-name>/protocol/openid-connect/logout
clientid: <vikunja-id>
clientsecret: <vikunja secret>
```
Keycloak Config:
- Navigate to the keycloak instance
- Create a new client with the type `OpenID Connect` and a unique ID.
- Set `Client authentication` to On
- Set `Root Url` to `https://vikunja.mydomain.com`
- Set `Valid redirect URIs` to `/auth/openid/keycloak`
- Create the client the navigate to the credentials tab and copy the `Client secret`
## Authentik
Authentik Config:
- Create a new Provider called "Vikunja" in Authentik
- Set the `Redirect URIs/Origins (RegEx)` to `https://vikunja.mydomain.com/auth/openid/authentik`
- Copy the Client ID and Client Secret
Vikunja Config:
```yaml
auth:
openid:
enabled: true
redirecturl: "https://vikunja.mydomain.com/auth/openid/"
providers:
- name: authentik
authurl: "https://authentik.mydomain.com/application/o/vikunja"
logouturl: "https://authentik.mydomain.com/application/o/vikunja/end-session/"
clientid: "" # copy from Authetik
clientsecret: "" # copy from Authentik
```
**Note:** The `authurl` that Vikunja requires is not the `Authorize URL` that you can see in the Provider.
OpenID Discovery is used to find the correct endpoint to use automatically, by accessing the `OpenID Configuration URL` (usually `https://authentik.mydomain.com/application/o/vikunja/.well-known/openid-configuration`).
Use this URL without the `.well-known/openid-configuration` as the `authurl`.
Typically this URL can be found in the metadata section within your identity provider.

View File

@ -1,215 +0,0 @@
---
date: "2022-08-09:00:00+02:00"
title: "OpenID"
draft: false
type: "doc"
menu:
sidebar:
parent: "setup"
---
# OpenID
Vikunja allows for authentication with an external identity source such as Authentik, Keycloak or similar via the
[OpenID Connect](https://openid.net/developers/specs/) standard.
{{< table_of_contents >}}
## OpenID Connect Overview
OpenID Connect is a standardized identity layer built on top of the more generic OAuth 2.0 specification, simplying interaction between the involved parties significantly.
While the [OpenID specification](https://openid.net/specs/openid-connect-core-1_0.html#Overview) is worth a read, we summarize the most important basics here.
The involved parties are:
- **Resource Owner:** typically the end-user
- **Resource Server:** the application server handling requests from the client, the Vikunja API in our case
- **Client:** the application or client accessing the RS on behalf of the RO. Vikunja web frontend or any of the apps
- **Authorization Server:** the server verifying the user identity and issuing tokens. These docs also use the words `OAuth 2.0 provider`, `Identity Provider` interchangeably.
After the user is authenticated, the provider issues a token to the user, containing various claims.
There's different types of tokens (ID token, access token, refresh token), and all of them are created as [JSON Web Token](https://www.rfc-editor.org/info/rfc7519).
Claims in turn are assertions containing information about the token bearer, usually the user.
**Scopes** are requested by the client when redirecting the end-user to the Authorization Server for authentication, and indirectly control which claims are included in the resulting tokens.
There's certain default scopes, but its also possible to define custom scopes, which are used by the feature assigning users to Teams automatically.
## Supported and required claims
Vikunja only requires a few claims to be present in the ID token to successfully authenticate the user.
Additional claims can be added though to customize behaviour during user creation.
The following table gives an overview about the claims supported by Vikunja. The scope column lists the scope that should request the claim according to the [OpenID Connect Standard](https://openid.net/specs/openid-connect-core-1_0.html#ScopeClaims). It omits the claims such as `sub` or `issuer` required by the `openid` scope, which must always be present.
| Claim | Type | Scope | Comment |
| ------|------|-------|---------|
| email | required | email | Sets the email address of the user. Taken from the `userinfo` endpoint if not present in ID token. User creation fails if claim not present and userinfo lookup fails. |
| name | optional | profile | Sets the display name of the user. Taken from the `userinfo` endpoint if not present in ID token. |
| preferred_username | optional | profile | Sets the username of the user. Taken from the `userinfo` endpoint if not present in ID token. If this also doesn't contain the claim, use the `nickname` claim from `userinfo` instead. If that one is not available either, the username is auto-generated by Vikunja. |
| vikunja_groups | optional | N/A | Can be used to automatically assign users to teams. See below for a more detailed explanation about the expected format and implementation examples. |
If one of the claims `email`, `name` or `preferred_username` is missing from the ID token, Vikunja will attempt to query the `userinfo` endpoint to obtain the information from there.
## Configuring OIDC Authentication
To achieve authentication via an external provider, it is required to (a) configure a confidential Client on your OAuth 2.0 provider and (b) configure Vikunja to authenticate against this provider.
[Example configurations]({{< ref "openid-examples.md">}}) are provided for various different identity providers, below you can find generic guides though.
OpenID Connect defines various flow types indicating how exactly the interaction between the involved parties work, Vikunja makes use of the standard **Authorization Code Flow**.
### Step 1: Configure your Authorization Server
The first step is to configure the Authorization Server to correctly handle requests coming from Vikunja.
In general, this involves the following steps at a minimum:
- Create a confidential client and obtain the client ID and client secret
- Configure (whitelist) redirect URLs that can be used by Vikunja
- Make sure the required scopes (`openid profile email` are the default scopes used by Vikunja) are supported
- Optional: configure an additional scope for automatic team assignment, see below for details
More detailed instructions for various different identity providers can be [found here]({{< ref "openid-examples.md">}})
### Step 2: Configure Vikunja
Vikunja has to be configured to use the identity provider. Please note that there is currently no option to configure these settings via environment variables, they have to be defined using the configuration file. The configuration schema is as follows:
```yaml
auth:
openid:
enabled: true
redirecturl: https://vikunja.mydomain.com/auth/openid/ <---- slash at the end is important
providers:
- name: <provider-name>
authurl: <auth-url> <----- Used for OIDC Discovery, usually the issuer
clientid: <vikunja client-id>
clientsecret: <vikunja client-secret>
scope: openid profile email
```
The value for `authurl` can be obtained from the metadata of your provider.
Note that the `authurl` is used for [OIDC Discovery](https://openid.net/specs/openid-connect-discovery-1_0.html).
Typically, you'll want to use the `issuer` URL as found in the provider metadata.
The values for `clientid` and `clientsecret` are typically obtained when configuring the client.
The scope usually doesn't need to be specified or changed, unless you want to configure the automatic team assignment.
Optionally it is possible to disable local authentication and therefore forcing users to login via OpenID connect:
```yaml
auth:
local:
enabled: false
```
## Automatically assign users to teams
Starting with version 0.24.0, Vikunja is capable of automatically adding users to a team based on OIDC claims added by the identity provider.
If configured, Vikunja will sync teams, automatically create new ones and make sure the members are part of the configured teams.
Teams which exist only because they were created from oidc attributes are not editable in Vikunja.
To distinguish between teams created in Vikunja and teams generated automatically via oidc, generated teams have an `oidcID` assigned internally.
Within the UI, the teams created through OIDC get a `(OIDC)` suffix to make them distinguishable from locally created teams.
On a high level, you need to make sure that the **ID token** issued by your identity provider contains a `vikunja_groups` claim, following the structure defined below.
It depends on the provider being used as well as the preferences of the administrator how this is achieved.
Typically you'd want to request an additional scope (e.g. `vikunja_scope`) which then triggers the identity provider to add the claim.
If the `vikunja_groups` is part of the **ID token**, Vikunja will start the procedure and import teams and team memberships.
The minimal claim structure expected by Vikunja is as follows:
```json
{
"vikunja_groups": [
{
"name": "team 1",
"oidcID": 33349
},
{
"name": "team 2",
"oidcID": 35933
}
]
}
```
It is also possible to pass the `description` and the `isPublic` flag as optional parameters. If not present, the description will be empty and project visibility defaults to false.
```json
{
"vikunja_groups": [
{
"name": "team 3",
"oidcID": 33349,
"description": "My Team Description",
"isPublic": true
},
]
}
```
For each team, you need to define a team `name` and an `oidcID`, where the `oidcID` can be any string with a length of less than 250 characters.
The `oidcID` is used to uniquely identify the team, so please make sure to keep this unique.
Below you'll find two example implementations for Authentik and Keycloak.
If you've successfully implemented this with another identity provider, please let us know and submit a PR to improve the docs.
### Setup in Authentik
To configure automatic team management through Authentik, we assume you have already [set up Authentik]({{< ref "openid-examples.md">}}#authentik) as an OIDC provider for authentication with Vikunja.
To use Authentik's group assignment feature, follow these steps:
1. Edit [your config]({{< ref "config.md">}}) to include the following scopes: `openid profile email vikunja_scope`
2. Open `<your authentik url>/if/admin/#/core/property-mappings`
3. Create a new property mapping called `vikunja_scope` as scope mapping. There is a field `expression` to enter python expressions that will be delivered with the oidc token.
4. Write a small script like the following to add group information to `vikunja_scope`:
```python
groupsDict = {"vikunja_groups": []}
for group in request.user.ak_groups.all():
groupsDict["vikunja_groups"].append({"name": group.name, "oidcID": group.num_pk})
return groupsDict
```
5. In Authentik's menu on the left, go to Applications > Providers > Select the Vikunja provider. Then click on "Edit", on the bottom open "Advanced protocol settings", select the newly created property mapping under "Scopes". Save the provider.
Now when you log into Vikunja via Authentik it will show you a list of scopes you are claiming.
You should see the description you entered on the OIDC provider's admin area.
Proceed to vikunja and open the teams page in the sidebar menu.
You should see "(OIDC)" written next to each team you were assigned through OIDC.
### Setup in Keycloak
The kind people from Makerspace Darmstadt e.V. have written [a guide on how to create a mapper for Vikunja here](https://github.com/makerspace-darmstadt/keycloak-vikunja-mapper).
## Use cases
All examples assume one team called "Team 1" to be configured within your provider.
* *Token delivers team.name +team.oidcID and Vikunja team does not exist:* \
New team will be created called "Team 1" with attribute oidcID: "33929"
2. *In Vikunja Team with name "team 1" already exists in vikunja, but has no oidcID set:* \
new team will be created called "team 1" with attribute oidcID: "33929"
3. *In Vikunja Team with name "team 1" already exists in vikunja, but has different oidcID set:* \
new team will be created called "team 1" with attribute oidcID: "33929"
4. *In Vikunja Team with oidcID "33929" already exists in vikunja, but has different name than "team1":* \
new team will be created called "team 1" with attribute oidcID: "33929"
5. *Scope vikunja_scope is not set:* \
nothing happens
6. *oidcID is not set:* \
You'll get error.
Custom Scope malformed
"The custom scope set by the OIDC provider is malformed. Please make sure the openid provider sets the data correctly for your scope. Check especially to have set an oidcID."
7. *In Vikunja I am in "team 3" with oidcID "", but the token does not deliver any data for "team 3":* \
You will stay in team 3 since it was not set by the oidc provider
8. *In Vikunja I am in "team 3" with oidcID "12345", but the token does not deliver any data for "team 3"*:\
You will be signed out of all teams, which have an oidcID set and are not contained in the token.
Especially if you've been the last team member, the team will be deleted.

View File

@ -1,152 +1,111 @@
---
date: "2019-02-12:00:00+02:00"
title: "Reverse Proxy"
draft: false
type: "doc"
menu:
sidebar:
parent: "setup"
---
# Setup behind a reverse proxy
These examples assume you have an instance of Vikunja running on your server listening on port `3456`.
If you've changed this setting, you need to update the server configurations accordingly.
{{< table_of_contents >}}
## NGINX
You may need to adjust `server_name` and `root` accordingly.
```conf
server {
listen 80;
server_name localhost;
location / {
proxy_pass http://localhost:3456;
client_max_body_size 20M;
}
}
```
<div class="notification is-warning">
<b>NOTE:</b> If you change the max upload size in Vikunja's settings, you'll need to also change the <code>client_max_body_size</code> in the nginx proxy config.
</div>
## NGINX Proxy Manager (NPM)
Following the [Docker Walkthrough]({{< ref "docker-start-to-finish.md" >}}) guide, you should be able to get Vikunja to work via HTTP connection to your server IP.
From there, all you have to do is adjust the following things:
### In `docker-compose.yml`
1. Change `VIKUNJA_SERVICE_PUBLICURL:` to your desired domain with `https://` and `/`.
2. Expose your desired port on host under `ports:`.
example:
```yaml
vikunja:
image: vikunja/vikunja
environment:
VIKUNJA_SERVICE_PUBLICURL: https://vikunja.your-domain.com/ # change vikunja.your-domain.com to your desired domain/subdomain.
VIKUNJA_DATABASE_HOST: db
VIKUNJA_DATABASE_PASSWORD: secret
VIKUNJA_DATABASE_TYPE: mysql
VIKUNJA_DATABASE_USER: vikunja
VIKUNJA_DATABASE_DATABASE: vikunja
VIKUNJA_SERVICE_JWTSECRET: <your-random-secret>
ports:
- 3456:3456 # Change 3456 on the left to the port of your choice.
volumes:
- ./files:/app/vikunja/files
depends_on:
- db
restart: unless-stopped
```
### In your DNS provider
Add an `A` records that points to your server IP.
You are of course free to change them to whatever domain/subdomain you desire and modify the `docker-compose.yml` accordingly.
(Tested on Cloudflare DNS. Settings are different for different DNS provider, in this case the end result should be `vikunja.your-domain.com`)
### In Nginx Proxy Manager
Add a Proxy Host as you normally would, and you don't have to add anything extra in Advanced.
Under `Details`:
```
Domain Names:
vikunja.your-domain.com
Scheme:
http
Forward Hostname/IP:
your-server-ip
Forward Port:
3456
Cached Assets:
Optional.
Block Common Exploits:
Toggled.
Websockets Support:
Toggled.
```
Under `SSL`:
```
SSL Certificate:
However you prefer.
Force SSL:
Toggled.
HTTP/2 Support:
Toggled.
HSTS Enabled:
Toggled.
HSTS Subdomains:
Toggled.
Use a DNS Challenge:
Not toggled.
Email Address for Let's Encrypt:
your-email@email.com
```
Your Vikunja service should now work and your HTTPS frontend should be able to reach the API after `docker-compose`.
## Apache
Put the following config in `cat /etc/apache2/sites-available/vikunja.conf`:
```aconf
<VirtualHost *:80>
ServerName localhost
<Proxy *>
Order Deny,Allow
Allow from all
</Proxy>
ProxyPass / http://localhost:3456/
ProxyPassReverse / http://localhost:3456/
</VirtualHost>
```
**Note:** The apache modules `proxy`, `proxy_http` and `rewrite` must be enabled for this.
## Caddy
Use the following Caddyfile to get Vikunja up and running:
```conf
vikunja.domainname.tld {
handle /* {
reverse_proxy 127.0.0.1:3456
}
}
```
---
date: "2019-02-12:00:00+02:00"
title: "Reverse Proxy"
draft: false
type: "doc"
menu:
sidebar:
parent: "setup"
---
# Setup behind a reverse proxy which also serves the frontend
These examples assume you have an instance of the backend running on your server listening on port `3456`.
If you've changed this setting, you need to update the server configurations accordingly.
{{< table_of_contents >}}
## NGINX
Below are two example configurations which you can put in your `nginx.conf`:
You may need to adjust `server_name` and `root` accordingly.
### with gzip enabled (recommended)
{{< highlight conf >}}
gzip on;
gzip_disable "msie6";
gzip_vary on;
gzip_proxied any;
gzip_comp_level 6;
gzip_buffers 16 8k;
gzip_http_version 1.1;
gzip_min_length 256;
gzip_types text/plain text/css application/json application/x-javascript text/xml application/xml application/xml+rss text/javascript application/vnd.ms-fontobject application/x-font-ttf font/opentype image/svg+xml;
server {
listen 80;
server_name localhost;
location / {
root /path/to/vikunja/static/frontend/files;
try_files $uri $uri/ /;
index index.html index.htm;
}
location ~* ^/(api|dav|\.well-known)/ {
proxy_pass http://localhost:3456;
client_max_body_size 20M;
}
}
{{< /highlight >}}
<div class="notification is-warning">
<b>NOTE:</b> If you change the max upload size in Vikunja's settings, you'll need to also change the <code>client_max_body_size</code> in the nginx proxy config.
</div>
### without gzip
{{< highlight conf >}}
server {
listen 80;
server_name localhost;
location / {
root /path/to/vikunja/static/frontend/files;
try_files $uri $uri/ /;
index index.html index.htm;
}
location ~* ^/(api|dav|\.well-known)/ {
proxy_pass http://localhost:3456;
client_max_body_size 20M;
}
}
{{< /highlight >}}
<div class="notification is-warning">
<b>NOTE:</b> If you change the max upload size in Vikunja's settings, you'll need to also change the <code>client_max_body_size</code> in the nginx proxy config.
</div>
## Apache
Put the following config in `cat /etc/apache2/sites-available/vikunja.conf`:
{{< highlight aconf >}}
<VirtualHost *:80>
ServerName localhost
<Proxy *>
Order Deny,Allow
Allow from all
</Proxy>
ProxyPass /api http://localhost:3456/api
ProxyPassReverse /api http://localhost:3456/api
ProxyPass /dav http://localhost:3456/dav
ProxyPassReverse /dav http://localhost:3456/dav
ProxyPass /.well-known http://localhost:3456/.well-known
ProxyPassReverse /.well-known http://localhost:3456/.well-known
DocumentRoot /var/www/html
RewriteEngine On
RewriteRule ^\/?(config\.json|favicon\.ico|css|fonts|images|img|js|api|dav|\.well-known) - [L]
RewriteRule ^(.*)$ /index.html [QSA,L]
</VirtualHost>
{{< /highlight >}}
**Note:** The apache modules `proxy`, `proxy_http` and `rewrite` must be enabled for this.
For more details see the [frontend apache configuration]({{< ref "install-frontend.md#apache">}}).

View File

@ -1,49 +0,0 @@
---
title: "Running Vikunja in a subdirectory"
date: 2022-09-23T12:15:04+02:00
draft: false
menu:
sidebar:
parent: "setup"
---
# Running Vikunja in a subdirectory
Running Vikunja in a subdirectory is not supported out of the box.
However, you can still run it in a subdirectory but need to build the frontend yourself.
## Frontend
First, make sure you're able to build the frontend from source.
Check [the guide about building from source]({{< ref "build-from-source.md">}}#frontend) about that.
### Dynamicly set with build command
Run the build with the `VIKUNJA_FRONTEND_BASE` variable specified.
```
VIKUNJA_FRONTEND_BASE=/SUBPATH/ pnpm run build
```
Where `SUBPATH` is the subdirectory you want to run Vikunja on.
### Set via .env.local
* Copy `.env.local.example` to `.env.local`
* Uncomment `VIKUNJA_FRONTEND_BASE` and set `/subpath/` to the desired path.
After saving, build Vikunja as normal.
```
pnpm run build
```
Once you have the frontend built, you can proceed to build the binary as outlined in [building from source]({{< ref "build-from-source.md">}}#api).
## API
If you're not using a reverse proxy you're good to go.
Simply configure the api url in the frontend as you normally would.
If you're using a reverse proxy you'll need to adjust the paths so that the api is available at `/SUBPATH/api/v1`.
You can check if everything is working correctly by opening `/SUBPATH/api/v1/info` in a browser.

View File

@ -1,23 +0,0 @@
---
title: "Typesense"
date: 2023-09-29T12:23:55+02:00
draft: false
menu:
sidebar:
parent: "setup"
---
# Use Typesense for enhanced search capabilities
Vikunja supports using [Typesense](https://typesense.org/) for a better search experience.
Typesense allows fast fulltext search including fuzzy matching support.
It may return different results than what you'd get with a database-only search, but generally, the results are more relevant to what you're looking for.
This document explains how to set up and use Typesense with Vikunja.
## Setup
1. First, install Typesense on your system. Refer to [their documentation](https://typesense.org/docs/guide/install-typesense.html) for specific instructions.
2. Once Typesense is available on your system and reachable by Vikunja, add the relevant configuration keys to your Vikunja config. [Check out the docs article about this]({{< ref "config.md#typesense">}}).
3. Index all tasks currently in Vikunja. To do that, run the `vikunja index` command with the api binary. This may take a while, depending on the size of your instance.
4. Restart the api. From now on, all task changes will be automatically indexed in Typesense.

View File

@ -11,9 +11,11 @@ menu:
# UTF-8 Settings
Vikunja itself is always fully capable of handling utf-8 characters.
However, your database might be not. Vikunja itself will work just fine until you want to use non-latin characters in your tasks/projects/etc.
However, your database might be not.
Vikunja itself will work just fine until you want to use non-latin characters in your tasks/lists/etc.
On this page, you will find information about how to fully ensure non-latin characters like *aüäß* or emojis work with your installation.
On this page, you will find information about how to fully ensure non-latin characters like aüäß or emojis work
with your installation.
{{< table_of_contents >}}
@ -30,9 +32,9 @@ To fix this, follow the steps below.
To find out if your db supports utf-8, run the following in a shell or similar, assuming the database
you're using for vikunja is called `vikunja`:
```sql
{{< highlight sql >}}
SELECT default_character_set_name FROM information_schema.SCHEMATA WHERE schema_name = 'vikunja';
```
{{< /highlight >}}
This will get you a result like the following:
@ -55,9 +57,10 @@ Before attempting any conversion, please [back up your database]({{< ref "backup
### 1. Create a pre-conversion script
Copy the following sql statements in a file called `preAlterTables.sql` and replace all occurrences of `vikunja` with the name of your database:
Copy the following sql statements in a file called `preAlterTables.sql` and replace all occurences of `vikunja` with
the name of your database:
```sql
{{< highlight sql >}}
use information_schema;
SELECT concat("ALTER DATABASE `",table_schema,"` CHARACTER SET = utf8mb4 COLLATE = utf8mb4_unicode_ci;") as _sql
FROM `TABLES` where table_schema like 'vikunja' and TABLE_TYPE='BASE TABLE' group by table_schema;
@ -67,31 +70,31 @@ SELECT concat("ALTER TABLE `",table_schema,"`.`",table_name, "` CHANGE `",column
FROM `COLUMNS` where table_schema like 'vikunja' and data_type in ('varchar','char');
SELECT concat("ALTER TABLE `",table_schema,"`.`",table_name, "` CHANGE `",column_name,"` `",column_name,"` ",data_type," CHARACTER SET utf8mb4 COLLATE utf8mb4_unicode_ci",IF(is_nullable="YES"," NULL"," NOT NULL"),";") as _sql
FROM `COLUMNS` where table_schema like 'vikunja' and data_type in ('text','tinytext','mediumtext','longtext');
```
{{< /highlight >}}
### 2. Run the pre-conversion script
Running this will create the actual migration script for your particular database structure and save it in a file called `alterTables.sql`:
```
{{< highlight bash >}}
mysql -uroot < preAlterTables.sql | egrep '^ALTER' > alterTables.sql
```
{{< /highlight >}}
### 3. Convert the database
At this point converting is just a matter of executing the previously generated sql script:
```
{{< highlight bash >}}
mysql -uroot < alterTables.sql
```
{{< /highlight >}}
### 4. Verify it was successfully converted
If everything worked as intended, your db collation should now look like this:
```sql
{{< highlight sql >}}
SELECT default_character_set_name FROM information_schema.SCHEMATA WHERE schema_name = 'vikunja';
```
{{< /highlight >}}
Should get you:

View File

@ -1,48 +0,0 @@
---
date: "2022-07-07:00:00+02:00"
title: "Versions"
draft: false
type: "doc"
menu:
sidebar:
parent: "setup"
---
# Vikunja Versions
Vikunja api is available in two different release flavors.
{{< table_of_contents >}}
## Stable
Stable releases have a fixed version number like `0.18.2` and are published at irregular intervals whenever a new version is ready.
They receive few bugfixes and security patches.
We use [Semantic Versioning](https://semver.org) for these releases.
## Unstable
Unstable versions are build every time a PR is merged or a commit to the main development branch is made.
As such, they contain the current development code and are more likely to have bugs.
There might be multiple new such builds a day.
Versions contain the last stable version, the number of commits since then and the commit the currently running binary was built from.
They look like this: `v0.18.1+269-5cc4927b9e`
Since a release is also cut from the main branch at some point, features from unstable will eventually become available in stable releases.
At the point in time of a new version release, the unstable build is the same exact thing.
The demo instance at [try.vikunja.io](https://try.vikunja.io) automatically updates and always runs the last unstable build.
## Switching between versions
First you should create a backup of your current setup!
Switching between versions is the same process as [upgrading]({{< ref install.md >}}#updating).
Simply replace the stable binary with an unstable one or vice-versa.
For installations using docker, it is as simple as using the `unstable` or `latest` tag to switch between versions.
**Note:** While switching from stable to unstable should work without any problem, switching back might work but is not recommended and might break your instance.
To switch from unstable back to stable the best way is to wait for the next stable release after the used unstable build and then upgrade to that.

View File

@ -10,10 +10,10 @@ menu:
# API Documentation
You can find the api docs under `http://vikunja.tld/api/v1/docs` of your instance.<br />
A public instance is available on [try.vikunja.io](https://try.vikunja.io/api/v1/docs).
You can find the api docs under `http://vikunja.tld/api/v1/docs` of your instance.
A public instance is available on [try.vikunja.io](http://try.vikunja.io/api/v1/docs).
These docs are autogenerated from annotations in the code with swagger.
These docs are autgenerated from annotations in the code with swagger.
The specification is hosted at `http://vikunja.tld/api/v1/docs.json`.
You can use this to embed it into other OpenAPI compatible applications if you want.
You can use this to embed it into other openapi compatible applications if you want.

View File

@ -1,6 +1,6 @@
---
date: "2019-05-12:00:00+01:00"
title: "CalDAV"
title: "Caldav"
draft: false
type: "doc"
menu:
@ -8,11 +8,11 @@ menu:
parent: "usage"
---
# CalDAV
# Caldav
> **Warning:** The CalDAV integration is in an early alpha stage and has bugs.
> **Warning:** The caldav integration is in an early alpha stage and has bugs.
> It works well with some clients while having issues with others.
> If you encounter issues, please [report them](https://code.vikunja.io/api/issues/new?body=[caldav])
> If you encounter issues, please [report them](https://code.vikunja.io/api/issues/new?body=[caldav])
Vikunja supports managing tasks via the [caldav VTODO](https://tools.ietf.org/html/rfc5545#section-3.6.2) extension.
@ -24,10 +24,10 @@ All urls are located under the `/dav` subspace.
Urls are:
* `/principals/<username>/`: Returns urls for project discovery. *Use this url to initially make connections to new clients.*
* `/projects/`: Used to manage projects
* `/projects/<Project ID>/`: Used to manage a single project
* `/projects/<Project ID>/<Task UID>`: Used to manage a task on a project
* `/principals/<username>/`: Returns urls for list discovery. *Use this url to initially make connections to new clients.*
* `/lists/`: Used to manage lists
* `/lists/<List ID>/`: Used to manage a single list
* `/lists/<List ID>/<Task UID>`: Used to manage a task on a list
## Supported properties
@ -37,51 +37,48 @@ Vikunja currently supports the following properties:
* `SUMMARY`
* `DESCRIPTION`
* `PRIORITY`
* `CATEGORIES`
* `COMPLETED`
* `CREATED` (only Vikunja → Client)
* `DUE`
* `DURATION`
* `DTSTAMP`
* `DTSTART`
* `LAST-MODIFIED` (only Vikunja → Client)
* `RRULE` (Recurrence) (only Vikunja → Client)
* `VALARM` (Reminders)
* `DURATION`
* `ORGANIZER`
* `RELATED-TO`
* `CREATED`
* `DTSTAMP`
* `LAST-MODIFIED`
Vikunja **currently does not** support these properties:
* `ATTACH`
* `CATEGORIES`
* `CLASS`
* `COMMENT`
* `CONTACT`
* `GEO`
* `LOCATION`
* `ORGANIZER` (disabled)
* `PERCENT-COMPLETE`
* `RECURRENCE-ID`
* `RELATED-TO`
* `RESOURCES`
* `SEQUENCE`
* `STATUS`
* `CONTACT`
* `RECURRENCE-ID`
* `URL`
* Recurrence
* `SEQUENCE`
## Tested Clients
### Working
* [Evolution](https://wiki.gnome.org/Apps/Evolution/)
* [OpenTasks](https://opentasks.app/) & [DAVx⁵](https://www.davx5.com/)
* [OpenTasks](https://opentasks.app/) + [DAVx⁵](https://www.davx5.com/)
* [Tasks (Android)](https://tasks.org/)
* [Korganizer](https://apps.kde.org/korganizer/)
### Not working
* [Thunderbird (68)](https://www.thunderbird.net/)
* iOS CalDAV Sync (See [#753](https://kolaente.dev/vikunja/vikunja/issues/753))
## Dev logs
The whole thing is not optimized at all and probably pretty inefficient.
The whole thing is not optimized at all and probably pretty inefficent.
Request body and headers are logged if the debug output is enabled.
@ -142,4 +139,6 @@ Requests from the app:::
And then it just stops.
... and complains about not being able to find the home set
... without even requesting it...
```

View File

@ -10,7 +10,7 @@ menu:
# Command line interface
You can interact with Vikunja using its `cli` interface.<br />
You can interact with Vikunja using its `cli` interface.
The following commands are available:
* [dump](#dump)
@ -26,32 +26,15 @@ If you don't specify a command, the [`web`](#web) command will be executed.
All commands use the same standard [config file]({{< ref "../setup/config.md">}}).
## Using the cli in docker
When running Vikunja in docker, you'll need to execute all commands in the `vikunja` container.
Instead of running the `vikunja` binary directly, run it like this:
```sh
docker exec <name of the vikunja container> /app/vikunja/vikunja <subcommand>
```
If you need to run a bunch of Vikunja commands, you can also create a shell alias for it:
```sh
alias vikunja-docker='docker exec <name of the vikunja container> /app/vikunja/vikunja'
```
Then use it as `vikunja-docker <subcommand>`.
### `dump`
Creates a zip file with all vikunja-related files.
This includes config, version, all files and the full database.
Usage:
```
{{< highlight bash >}}
$ vikunja dump
```
{{< /highlight >}}
### `help`
@ -59,58 +42,58 @@ Shows more detailed help about any command.
Usage:
```
{{< highlight bash >}}
$ vikunja help [command]
```
{{< /highlight >}}
### `migrate`
Run all database migrations which didn't already run.
Usage:
```
{{< highlight bash >}}
$ vikunja migrate [flags]
$ vikunja migrate [command]
```
{{< /highlight >}}
#### `migrate list`
Shows a list with all database migrations.
Usage:
```
{{< highlight bash >}}
$ vikunja migrate list
```
{{< /highlight >}}
#### `migrate rollback`
Roll migrations back until a certain point.
Usage:
```
$ vikunja migrate rollback [flags]
```
{{< highlight bash >}}
$ vikunja migrate rollback [flags]
{{< /highlight >}}
Flags:
* `-n`, `--name` string: The id of the migration you want to roll back until.
### `restore`
Restores a previously created dump from a zip file, see `dump`.
Usage:
```
{{< highlight bash >}}
$ vikunja restore <path to dump zip file>
```
{{< /highlight >}}
### `testmail`
Sends a test mail using the configured smtp connection.
Usage:
```
{{< highlight bash >}}
$ vikunja testmail <email to send the test mail to>
```
{{< /highlight >}}
### `user`
@ -121,9 +104,9 @@ Bundles a few commands to manage users.
Enable or disable a user. Will toggle the current status if no flag (`--enable` or `--disable`) is provided.
Usage:
```
{{< highlight bash >}}
$ vikunja user change-status <user id> <flags>
```
{{< /highlight >}}
Flags:
* `-d`, `--disable`: Disable the user.
@ -134,9 +117,9 @@ Flags:
Create a new user.
Usage:
```
{{< highlight bash >}}
$ vikunja user create <flags>
```
{{< /highlight >}}
Flags:
* `-a`, `--avatar-provider`: The avatar provider of the new user. Optional.
@ -144,38 +127,23 @@ Flags:
* `-p`, `--password`: The password of the new user. You will be asked to enter it if not provided through the flag.
* `-u`, `--username`: The username of the new user.
#### `user delete`
Start the user deletion process.
If called without the `--now` flag, this command will only trigger an email to the user in order for them to confirm and start the deletion process (this is the same behavoir as if the user requested their deletion via the web interface).
With the flag the user is deleted **immediately**.
**USE WITH CAUTION.**
```
$ vikunja user delete <id> <flags>
```
Flags:
* `-n`, `--now` If provided, deletes the user immediately instead of emailing them first.
#### `user list`
Shows a list of all users.
Usage:
```
{{< highlight bash >}}
$ vikunja user list
```
{{< /highlight >}}
#### `user reset-password`
Reset a users password, either through mailing them a reset link or directly.
Usage:
```
{{< highlight bash >}}
$ vikunja user reset-password <flags>
```
{{< /highlight >}}
Flags:
* `-d`, `--direct`: If provided, reset the password directly instead of sending the user a reset mail.
@ -186,9 +154,9 @@ Flags:
Update an existing user.
Usage:
```
{{< highlight bash >}}
$ vikunja user update <user id>
```
{{< /highlight >}}
Flags:
* `-a`, `--avatar-provider`: The new avatar provider of the new user.
@ -201,15 +169,15 @@ Prints the version of Vikunja.
This is either the semantic version (something like `0.7`) or version + git commit hash.
Usage:
```
$ vikunja version
```
{{< highlight bash >}}
$ vikunja version
{{< /highlight >}}
### `web`
Starts Vikunja's REST api server.
Usage:
```
$ vikunja web
```
{{< highlight bash >}}
$ vikunja web
{{< /highlight >}}

View File

@ -4,8 +4,8 @@ title: "Errors"
draft: false
type: "doc"
menu:
sidebar:
parent: "usage"
sidebar:
parent: "usage"
---
# Errors
@ -24,27 +24,22 @@ This document describes the different errors Vikunja can return.
| ErrorCode | HTTP Status Code | Description |
|-----------|------------------|-------------|
| 1001 | 400 | A user with this username already exists. |
| 1002 | 400 | A user with this email address already exists. |
| 1004 | 400 | No username and password specified. |
| 1005 | 404 | The user does not exist. |
| 1006 | 400 | Could not get the user id. |
| 1008 | 412 | No password reset token provided. |
| 1009 | 412 | Invalid password reset token. |
| 1010 | 412 | Invalid email confirm token. |
| 1011 | 412 | Wrong username or password. |
| 1012 | 412 | Email address of the user not confirmed. |
| 1013 | 412 | New password is empty. |
| 1014 | 412 | Old password is empty. |
| 1015 | 412 | Totp is already enabled for this user. |
| 1016 | 412 | Totp is not enabled for this user. |
| 1017 | 412 | The provided Totp passcode is invalid. |
| 1018 | 412 | The provided user avatar provider type setting is invalid. |
| 1019 | 412 | No openid email address was provided. |
| 1020 | 412 | This user account is disabled. |
| 1021 | 412 | This account is managed by a third-party authentication provider. |
| 1021 | 412 | The username must not contain spaces. |
| 1022 | 412 | The custom scope set by the OIDC provider is malformed. Please make sure the openid provider sets the data correctly for your scope. Check especially to have set an oidcID. |
| 1001 | 400 | A user with this username already exists. |
| 1002 | 400 | A user with this email address already exists. |
| 1004 | 400 | No username and password specified. |
| 1005 | 404 | The user does not exist. |
| 1006 | 400 | Could not get the user id. |
| 1008 | 412 | No password reset token provided. |
| 1009 | 412 | Invalid password reset token. |
| 1010 | 412 | Invalid email confirm token. |
| 1011 | 412 | Wrong username or password. |
| 1012 | 412 | Email address of the user not confirmed. |
| 1013 | 412 | New password is empty. |
| 1014 | 412 | Old password is empty. |
| 1015 | 412 | Totp is already enabled for this user. |
| 1016 | 412 | Totp is not enabled for this user. |
| 1017 | 412 | The provided Totp passcode is invalid. |
| 1018 | 412 | The provided user avatar provider type setting is invalid. |
## Validation
@ -53,74 +48,70 @@ This document describes the different errors Vikunja can return.
| 2001 | 400 | ID cannot be empty or 0. |
| 2002 | 400 | Some of the request data was invalid. The response contains an aditional array with all invalid fields. |
## Project
| ErrorCode | HTTP Status Code | Description |
|-----------|------------------|------------------------------------------------------------------------------------------------------------------------------------|
| 3001 | 404 | The project does not exist. |
| 3004 | 403 | The user needs to have read permissions on that project to perform that action. |
| 3005 | 400 | The project title cannot be empty. |
| 3006 | 404 | The project share does not exist. |
| 3007 | 400 | A project with this identifier already exists. |
| 3008 | 412 | The project is archived and can therefore only be accessed read only. This is also true for all tasks associated with this project. |
| 3009 | 412 | The project cannot belong to a dynamically generated parent project like "Favorites". |
| 3010 | 412 | This project cannot be a child of itself. |
| 3011 | 412 | This project cannot have a cyclic relationship to a parent project. |
| 3012 | 412 | This project cannot be deleted because a user has set it as their default project. |
| 3013 | 412 | This project cannot be archived because a user has set it as their default project. |
| 3014 | 404 | This project view does not exist. |
## Task
| ErrorCode | HTTP Status Code | Description |
|-----------|------------------|----------------------------------------------------------------------------|
| 4001 | 400 | The project task text cannot be empty. |
| 4002 | 404 | The project task does not exist. |
| 4003 | 403 | All bulk editing tasks must belong to the same project. |
| 4004 | 403 | Need at least one task when bulk editing tasks. |
| 4005 | 403 | The user does not have the right to see the task. |
| 4006 | 403 | The user tried to set a parent task as the task itself. |
| 4007 | 400 | The user tried to create a task relation with an invalid kind of relation. |
| 4008 | 409 | The user tried to create a task relation which already exists. |
| 4009 | 404 | The task relation does not exist. |
| 4010 | 400 | Cannot relate a task with itself. |
| 4011 | 404 | The task attachment does not exist. |
| 4012 | 400 | The task attachment is too large. |
| 4013 | 400 | The task sort param is invalid. |
| 4014 | 400 | The task sort order is invalid. |
| 4015 | 404 | The task comment does not exist. |
| 4016 | 400 | Invalid task field. |
| 4017 | 400 | Invalid task filter comparator. |
| 4018 | 400 | Invalid task filter concatinator. |
| 4019 | 400 | Invalid task filter value. |
| 4020 | 400 | The provided attachment does not belong to that task. |
| 4021 | 400 | This user is already assigned to that task. |
| 4022 | 400 | The task has a relative reminder which does not specify relative to what. |
| 4023 | 409 | Tried to create a task relation which would create a cycle. |
| 4024 | 400 | The provided filter expression is invalid. |
| 4025 | 400 | The reaction kind is invalid. |
| 4026 | 400 | You must provide a project view ID when sorting by position. |
## Team
| ErrorCode | HTTP Status Code | Description |
|-----------|------------------|----------------------------------------------------------------------|
| 6001 | 400 | The team name cannot be empty. |
| 6002 | 404 | The team does not exist. |
| 6004 | 409 | The team already has access to that project. |
| 6005 | 409 | The user is already a member of that team. |
| 6006 | 400 | Cannot delete the last team member. |
| 6007 | 403 | The team does not have access to the project to perform that action. |
| 6008 | 400 | There are no teams found with that team name. |
| 6009 | 400 | There is no oidc team with that team name and oidcId. |
| 6010 | 400 | There are no oidc teams found for the user. |
## User Project Access
## List
| ErrorCode | HTTP Status Code | Description |
|-----------|------------------|-------------|
| 7002 | 409 | The user already has access to that project. |
| 7003 | 403 | The user does not have access to that project. |
| 3001 | 404 | The list does not exist. |
| 3004 | 403 | The user needs to have read permissions on that list to perform that action. |
| 3005 | 400 | The list title cannot be empty. |
| 3006 | 404 | The list share does not exist. |
| 3007 | 400 | A list with this identifier already exists. |
| 3008 | 412 | The list is archived and can therefore only be accessed read only. This is also true for all tasks associated with this list. |
## Task
| ErrorCode | HTTP Status Code | Description |
|-----------|------------------|-------------|
| 4001 | 400 | The list task text cannot be empty. |
| 4002 | 404 | The list task does not exist. |
| 4003 | 403 | All bulk editing tasks must belong to the same list. |
| 4004 | 403 | Need at least one task when bulk editing tasks. |
| 4005 | 403 | The user does not have the right to see the task. |
| 4006 | 403 | The user tried to set a parent task as the task itself. |
| 4007 | 400 | The user tried to create a task relation with an invalid kind of relation. |
| 4008 | 409 | The user tried to create a task relation which already exists. |
| 4009 | 404 | The task relation does not exist. |
| 4010 | 400 | Cannot relate a task with itself. |
| 4011 | 404 | The task attachment does not exist. |
| 4012 | 400 | The task attachment is too large. |
| 4013 | 400 | The task sort param is invalid. |
| 4014 | 400 | The task sort order is invalid. |
| 4015 | 404 | The task comment does not exist. |
| 4016 | 403 | Invalid task field. |
| 4017 | 403 | Invalid task filter comparator. |
| 4018 | 403 | Invalid task filter concatinator. |
| 4019 | 403 | Invalid task filter value. |
## Namespace
| ErrorCode | HTTP Status Code | Description |
|-----------|------------------|-------------|
| 5001 | 404 | The namspace does not exist. |
| 5003 | 403 | The user does not have access to the specified namespace. |
| 5006 | 400 | The namespace name cannot be empty. |
| 5009 | 403 | The user needs to have namespace read access to perform that action. |
| 5010 | 403 | This team does not have access to that namespace. |
| 5011 | 409 | This user has already access to that namespace. |
| 5012 | 412 | The namespace is archived and can therefore only be accessed read only. |
## Team
| ErrorCode | HTTP Status Code | Description |
|-----------|------------------|-------------|
| 6001 | 400 | The team name cannot be emtpy. |
| 6002 | 404 | The team does not exist. |
| 6004 | 409 | The team already has access to that namespace or list. |
| 6005 | 409 | The user is already a member of that team. |
| 6006 | 400 | Cannot delete the last team member. |
| 6007 | 403 | The team does not have access to the list to perform that action. |
## User List Access
| ErrorCode | HTTP Status Code | Description |
|-----------|------------------|-------------|
| 7002 | 409 | The user already has access to that list. |
| 7003 | 403 | The user does not have access to that list. |
## Label
@ -134,24 +125,24 @@ This document describes the different errors Vikunja can return.
| ErrorCode | HTTP Status Code | Description |
|-----------|------------------|-------------|
| 9001 | 403 | The right is invalid. |
| 9001 | 403 | The right is invalid. |
## Kanban
| ErrorCode | HTTP Status Code | Description |
|-----------|------------------|-------------|
| 10001 | 404 | The bucket does not exist. |
| 10002 | 400 | The bucket does not belong to that project. |
| 10003 | 412 | You cannot remove the last bucket on a project. |
| 10002 | 400 | The bucket does not belong to that list. |
| 10003 | 412 | You cannot remove the last bucket on a list. |
| 10004 | 412 | You cannot add the task to this bucket as it already exceeded the limit of tasks it can hold. |
| 10005 | 412 | There can be only one done bucket per project. |
| 10005 | 412 | There can be only one done bucket per list. |
## Saved Filters
| ErrorCode | HTTP Status Code | Description |
|-----------|------------------|-------------|
| 11001 | 404 | The saved filter does not exist. |
| 11002 | 412 | Saved filters are not available for link shares. |
| 11002 | 412 | Saved filters are not available for link shares. |
## Subscriptions
@ -162,8 +153,7 @@ This document describes the different errors Vikunja can return.
## Link Shares
| ErrorCode | HTTP Status Code | Description |
|-----------|------------------|--------------------------------------------------------------------------------|
| ErrorCode | HTTP Status Code | Description |
|-----------|------------------|-------------|
| 13001 | 412 | This link share requires a password for authentication, but none was provided. |
| 13002 | 403 | The provided link share password is invalid. |
| 13003 | 400 | The provided link share token is invalid. |
| 13002 | 403 | The provided link share password was invalid. |

View File

@ -1,67 +0,0 @@
---
title: "Filters"
date: 2024-03-09T19:51:32+02:00
draft: false
type: doc
menu:
sidebar:
parent: "usage"
---
# Filter Syntax
To filter tasks via the api, you can use a query syntax similar to SQL.
This document is about filtering via the api. To filter in Vikunja's web ui, check out the help text below the filter query input.
{{< table_of_contents >}}
## Available fields
The available fields for filtering include:
* `done`: Whether the task is completed or not
* `priority`: The priority level of the task (1-5)
* `percentDone`: The percentage of completion for the task (0-100)
* `dueDate`: The due date of the task
* `startDate`: The start date of the task
* `endDate`: The end date of the task
* `doneAt`: The date and time when the task was completed
* `assignees`: The assignees of the task
* `labels`: The labels associated with the task
* `project`: The project the task belongs to (only available for saved filters, not on a project level)
You can date math to set relative dates. Click on the date value in a query to find out more.
All strings must be either single-word or enclosed in `"` or `'`. This extends to date values like `2024-03-11`.
## Operators
The available operators for filtering include:
* `!=`: Not equal to
* `=`: Equal to
* `>`: Greater than
* `>=`: Greater than or equal to
* `<`: Less than
* `<=`: Less than or equal to
* `like`: Matches a pattern (using wildcard `%`)
* `in`: Matches any value in a comma-seperated list of values
To combine multiple conditions, you can use the following logical operators:
* `&&`: AND operator, matches if all conditions are true
* `||`: OR operator, matches if any of the conditions are true
* `(` and `)`: Parentheses for grouping conditions
## Examples
Here are some examples of filter queries:
* `priority = 4`: Matches tasks with priority level 4
* `dueDate < now`: Matches tasks with a due date in the past
* `done = false && priority >= 3`: Matches undone tasks with priority level 3 or higher
* `assignees in [user1, user2]`: Matches tasks assigned to either "user1" or "user2
* `(priority = 1 || priority = 2) && dueDate <= now`: Matches tasks with priority level 1 or 2 and a due date in the past

View File

@ -1,43 +0,0 @@
---
title: "n8n"
date: 2023-10-24T19:31:35+02:00
draft: false
menu:
sidebar:
parent: "usage"
---
# Using Vikunja with n8n
Vikunja maintains a [community node](https://github.com/go-vikunja/n8n-vikunja-nodes) for [n8n](https://n8n.io),
allowing you to easily integrate Vikunja with all kinds of other tools and services.
{{< table_of_contents >}}
## Installation
To install the node in your n8n installation:
1. In your n8n instance, go to **Settings > Community Nodes**.
2. Select Install.
3. Enter `n8n-nodes-vikunja` as the npm Package Name
4. Agree to the risks of using community nodes: select I understand the risks of installing unverified code from a
public source.
5. Select Install. n8n installs the node, and returns to the Community Nodes list in Settings.
6. Vikunja actions and triggers are now available in n8n.
[Official n8n docs about the installation](https://docs.n8n.io/integrations/community-nodes/installation/)
## Authentication
To authenticate your automation against Vikunja:
1. In Vikunja, go to **Settings > API Tokens** and create a new token. Use all scopes for the kind of task you want to
do. \
*Note:* If you want to use the webhook trigger node, the api token should have permissions to create, read and delete
webhooks.
2. Now in n8n, go to **Credentials** and then click on **Add Credential**.
3. Search for `Vikunja API` and click *Continue*
4. Enter the API key you created in step 1.
5. Enter the API URL of your Vikunja instance, with `/api/v1` suffix.
6. When you now create a Vikunja node, select the created credentials.

View File

@ -10,16 +10,16 @@ menu:
# Available task relation kinds
| Code | Description | Opposite of |
|------|-------------|-------------|
| `subtask` | Task is a subtask of the other task. | `parenttask` |
| `parenttask` | Task is a parent task of the other task. | `subtask` |
| `related` | Both tasks are related to each other.<br /> How is not more specified. | ⸺ |
| `duplicateof` | Task is a duplicate of the other task. | `duplicates` |
| `duplicates` | Task duplicates the other task. | `duplicateof` |
| `blocking` | Task is blocking the other task. | `blocked` |
| `blocked` | Task is blocked by the other task. | `blocking` |
| `precedes` | Task precedes the other task. | `follows` |
| `follows` | Task follows the other task. | `precedes` |
| `copiedfrom` | Task is copied from the other task. | `copiedto` |
| `copiedto` | Task is copied to the other task. | `copiedfrom` |
| Code | Description |
|------|-------------|
| subtask | Task is a subtask of the other task. This is the opposite of `parenttask`. |
| parenttask | Task is a parent task of the other task. This is the opposite of `subtask`. |
| related | Both tasks are related to each other. How is not more specified. |
| duplicateof | Task is a duplicate of the other task. This is the opposite of `duplicates`. |
| duplicates | Task duplicates the other task. This is the opposite of `duplicateof`. |
| blocking | Task is blocking the other task. This is the opposite of `blocked`. |
| blocked | Task is blocked by the other task. This is the opposite of `blocking`. |
| precedes | Task precedes the other task. This is the opposite of `follows`. |
| follows | Task follows the other task. This is the opposite of `precedes`. |
| copiedfrom | Task is copied from the other task. This is the opposite of `copiedto`. |
| copiedto | Task is copied to the other task. This is the opposite of `copiedfrom`. |

View File

@ -8,22 +8,22 @@ menu:
parent: "usage"
---
# Project rights for teams and users
# List and namespace rights for teams and users
Whenever you share a project with a user or team, you can specify a `rights` parameter.
Whenever you share a list or namespace with a user or team, you can specify a `rights` parameter.
This parameter controls the rights that team or user is going to have (or has, if you request the current sharing status).
Rights are being specified using integers.
The following values are possible:
| Right (int) | Meaning |
|-------------|-------------------------------------------------------------------------------------------------|
| 0 (Default) | Read only. Anything which is shared with this right cannot be edited. |
| 1 | Read and write. Projects shared with this right can be read and written to by the team or user. |
| 2 | Admin. Can do anything like read and write, but can additionally manage sharing options. |
| Right (int) | Meaning |
|-------------|---------|
| 0 (Default) | Read only. Anything which is shared with this right cannot be edited. |
| 1 | Read and write. Namespaces or lists shared with this right can be read and written to by the team or user. |
| 2 | Admin. Can do anything like read and write, but can additionally manage sharing options. |
## Team admins
When adding or querying a team, every member has an additional boolean value stating if it is admin or not.
A team admin can also add and remove team members and also change whether a user in the team is admin or not.
A team admin can also add and remove team members and also change whether a user in the team is admin or not.

View File

@ -1,58 +0,0 @@
---
title: "Webhooks"
date: 2023-10-17T19:51:32+02:00
draft: false
type: doc
menu:
sidebar:
parent: "usage"
---
# Webhooks
Starting with version 0.22.0, Vikunja allows you to define webhooks to notify other services of events happening within Vikunja.
{{< table_of_contents >}}
## How to create webhooks
To create a webhook, in the project options select "Webhooks". The form will allow you to create and modify webhooks.
Check out [the api docs](https://try.vikunja.io/api/v1/docs#tag/webhooks) for information about how to create webhooks programatically.
## Available events and their payload
All events registered as webhook events in [the event listeners definition](https://kolaente.dev/vikunja/vikunja/src/branch/main/pkg/models/listeners.go#L69) can be used as webhook target.
A webhook payload will look similar to this:
```json
{
"event_name": "task.created",
"time": "2023-10-17T19:39:32.924194436+02:00",
"data": {}
}
```
The `data` property will contain the raw event data as it was registered in the `listeners.go` file.
The `time` property holds the time when the webhook payload data was sent.
It always uses the ISO 8601 format with date, time and time zone offset.
## Security considerations
### Signing
Vikunja allows you to provide a secret when creating the webhook.
If you set a secret, all outgoing webhook requests will contain an `X-Vikunja-Signature` header with an HMAC signature over the webhook json payload.
Check out [webhooks.fyi](https://webhooks.fyi/security/hmac) for more information about how to validate the HMAC signature.
### Hosting webhook infrastructure
Vikunja has support to use [mole](https://github.com/frain-dev/mole) as a proxy for outgoing webhook requests.
This allows you to prevent SSRF attacts on your own infrastructure.
You should use this and [configure it appropriately]({{< ref "../setup/config.md">}}#webhooks) if you're not the only one using your Vikunja instance.
Check out [webhooks.fyi](https://webhooks.fyi/best-practices/webhook-providers#implement-security-on-egress-communication) for more information about the attack vector and reasoning to prevent this.

View File

@ -18,16 +18,4 @@ server {
location /docs/contact {
return 301 $scheme://vikunja.io/en/contact;
}
location /docs/docs {
return 301 $scheme://vikunja.io/docs;
}
location /docs/install-backend {
return 301 $scheme://vikunja.io/docs/installing;
}
location /docs/install-frontend {
return 301 $scheme://vikunja.io/docs/installing;
}
}

Binary file not shown.

Before

Width:  |  Height:  |  Size: 121 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 502 KiB

View File

@ -1,25 +0,0 @@
{
"nodes": {
"nixpkgs": {
"locked": {
"lastModified": 1712449641,
"narHash": "sha256-U9DDWMexN6o5Td2DznEgguh8TRIUnIl9levmit43GcI=",
"owner": "NixOS",
"repo": "nixpkgs",
"rev": "600b15aea1b36eeb43833a50b0e96579147099ff",
"type": "github"
},
"original": {
"id": "nixpkgs",
"type": "indirect"
}
},
"root": {
"inputs": {
"nixpkgs": "nixpkgs"
}
}
},
"root": "root",
"version": 7
}

View File

@ -1,20 +0,0 @@
{
description = "Vikunja dev environment";
outputs = { self, nixpkgs }:
let pkgs = nixpkgs.legacyPackages.x86_64-linux;
in {
defaultPackage.x86_64-linux =
pkgs.mkShell { buildInputs = with pkgs; [
# General tools
git-cliff
# Frontend tools
nodePackages.pnpm cypress
# API tools
go golangci-lint mage
# Desktop
electron
];
};
};
}

View File

@ -1,29 +0,0 @@
# EditorConfig is awesome: https://EditorConfig.org
# top-most EditorConfig file
root = true
[*]
indent_style = tab
end_of_line = lf
charset = utf-8
trim_trailing_whitespace = false
insert_final_newline = false
[*.vue]
indent_style = tab
[*.{yaml,yml}]
indent_style = space
indent_size = 2
[*.json]
indent_style = space
indent_size = 2
[*.{scss,css}]
indent_style = space
indent_size = 2
[.nvmrc]
insert_final_newline = false

Some files were not shown because too many files have changed in this diff Show More